1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
10 #include <netinet/in.h>
11 #include <netinet/ip.h>
12 #include <netinet/ip6.h>
14 #include <sys/queue.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
23 #include <rte_launch.h>
24 #include <rte_atomic.h>
25 #include <rte_cycles.h>
26 #include <rte_prefetch.h>
27 #include <rte_lcore.h>
28 #include <rte_per_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_interrupts.h>
31 #include <rte_random.h>
32 #include <rte_debug.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
41 #include <rte_jhash.h>
42 #include <rte_cryptodev.h>
43 #include <rte_security.h>
49 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
51 #define MAX_JUMBO_PKT_LEN 9600
53 #define MEMPOOL_CACHE_SIZE 256
55 #define NB_MBUF (32000)
57 #define CDEV_QUEUE_DESC 2048
58 #define CDEV_MAP_ENTRIES 16384
59 #define CDEV_MP_NB_OBJS 1024
60 #define CDEV_MP_CACHE_SZ 64
61 #define MAX_QUEUE_PAIRS 1
63 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
67 /* Configure how many packets ahead to prefetch, when reading packets */
68 #define PREFETCH_OFFSET 3
70 #define MAX_RX_QUEUE_PER_LCORE 16
72 #define MAX_LCORE_PARAMS 1024
74 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
77 * Configurable number of RX/TX ring descriptors
79 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
80 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
81 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
82 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
84 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
85 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
86 (((uint64_t)((a) & 0xff) << 56) | \
87 ((uint64_t)((b) & 0xff) << 48) | \
88 ((uint64_t)((c) & 0xff) << 40) | \
89 ((uint64_t)((d) & 0xff) << 32) | \
90 ((uint64_t)((e) & 0xff) << 24) | \
91 ((uint64_t)((f) & 0xff) << 16) | \
92 ((uint64_t)((g) & 0xff) << 8) | \
93 ((uint64_t)(h) & 0xff))
95 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
96 (((uint64_t)((h) & 0xff) << 56) | \
97 ((uint64_t)((g) & 0xff) << 48) | \
98 ((uint64_t)((f) & 0xff) << 40) | \
99 ((uint64_t)((e) & 0xff) << 32) | \
100 ((uint64_t)((d) & 0xff) << 24) | \
101 ((uint64_t)((c) & 0xff) << 16) | \
102 ((uint64_t)((b) & 0xff) << 8) | \
103 ((uint64_t)(a) & 0xff))
105 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
107 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
108 (addr)->addr_bytes[0], (addr)->addr_bytes[1], \
109 (addr)->addr_bytes[2], (addr)->addr_bytes[3], \
110 (addr)->addr_bytes[4], (addr)->addr_bytes[5], \
113 /* port/source ethernet addr and destination ethernet addr */
114 struct ethaddr_info {
118 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
119 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
120 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
121 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
122 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
125 #define CMD_LINE_OPT_CONFIG "config"
126 #define CMD_LINE_OPT_SINGLE_SA "single-sa"
127 #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
128 #define CMD_LINE_OPT_RX_OFFLOAD "rxoffload"
129 #define CMD_LINE_OPT_TX_OFFLOAD "txoffload"
132 /* long options mapped to a short option */
134 /* first long only option value must be >= 256, so that we won't
135 * conflict with short options
137 CMD_LINE_OPT_MIN_NUM = 256,
138 CMD_LINE_OPT_CONFIG_NUM,
139 CMD_LINE_OPT_SINGLE_SA_NUM,
140 CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
141 CMD_LINE_OPT_RX_OFFLOAD_NUM,
142 CMD_LINE_OPT_TX_OFFLOAD_NUM,
145 static const struct option lgopts[] = {
146 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
147 {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
148 {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
149 {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
150 {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
154 /* mask of enabled ports */
155 static uint32_t enabled_port_mask;
156 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
157 static uint32_t unprotected_port_mask;
158 static int32_t promiscuous_on = 1;
159 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
160 static uint32_t nb_lcores;
161 static uint32_t single_sa;
162 static uint32_t single_sa_idx;
163 static uint32_t frame_size;
166 * RX/TX HW offload capabilities to enable/use on ethernet ports.
167 * By default all capabilities are enabled.
169 static uint64_t dev_rx_offload = UINT64_MAX;
170 static uint64_t dev_tx_offload = UINT64_MAX;
172 /* application wide librte_ipsec/SA parameters */
173 struct app_sa_prm app_sa_prm = {.enable = 0};
175 struct lcore_rx_queue {
178 } __rte_cache_aligned;
180 struct lcore_params {
184 } __rte_cache_aligned;
186 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
188 static struct lcore_params *lcore_params;
189 static uint16_t nb_lcore_params;
191 static struct rte_hash *cdev_map_in;
192 static struct rte_hash *cdev_map_out;
196 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
200 uint16_t nb_rx_queue;
201 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
202 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
203 struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
204 struct ipsec_ctx inbound;
205 struct ipsec_ctx outbound;
206 struct rt_ctx *rt4_ctx;
207 struct rt_ctx *rt6_ctx;
208 } __rte_cache_aligned;
210 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
212 static struct rte_eth_conf port_conf = {
214 .mq_mode = ETH_MQ_RX_RSS,
215 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
217 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
222 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
223 ETH_RSS_TCP | ETH_RSS_SCTP,
227 .mq_mode = ETH_MQ_TX_NONE,
231 static struct socket_ctx socket_ctx[NB_SOCKETS];
234 adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
239 plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
240 if (plen < m->pkt_len) {
241 trim = m->pkt_len - plen;
242 rte_pktmbuf_trim(m, trim);
247 adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
252 plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
253 if (plen < m->pkt_len) {
254 trim = m->pkt_len - plen;
255 rte_pktmbuf_trim(m, trim);
260 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
262 const struct rte_ether_hdr *eth;
263 const struct rte_ipv4_hdr *iph4;
264 const struct rte_ipv6_hdr *iph6;
266 eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
267 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
269 iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
271 adjust_ipv4_pktlen(pkt, iph4, 0);
273 if (iph4->next_proto_id == IPPROTO_ESP)
274 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
276 t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
277 t->ip4.pkts[(t->ip4.num)++] = pkt;
280 pkt->l3_len = sizeof(*iph4);
281 } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
283 size_t l3len, ext_len;
286 /* get protocol type */
287 iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
289 adjust_ipv6_pktlen(pkt, iph6, 0);
291 next_proto = iph6->proto;
293 /* determine l3 header size up to ESP extension */
294 l3len = sizeof(struct ip6_hdr);
295 p = rte_pktmbuf_mtod(pkt, uint8_t *);
296 while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
297 (next_proto = rte_ipv6_get_next_ext(p + l3len,
298 next_proto, &ext_len)) >= 0)
301 /* drop packet when IPv6 header exceeds first segment length */
302 if (unlikely(l3len > pkt->data_len)) {
303 rte_pktmbuf_free(pkt);
307 if (next_proto == IPPROTO_ESP)
308 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
310 t->ip6.data[t->ip6.num] = &iph6->proto;
311 t->ip6.pkts[(t->ip6.num)++] = pkt;
316 /* Unknown/Unsupported type, drop the packet */
317 RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
318 rte_be_to_cpu_16(eth->ether_type));
319 rte_pktmbuf_free(pkt);
322 /* Check if the packet has been processed inline. For inline protocol
323 * processed packets, the metadata in the mbuf can be used to identify
324 * the security processing done on the packet. The metadata will be
325 * used to retrieve the application registered userdata associated
326 * with the security session.
329 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
331 struct ipsec_mbuf_metadata *priv;
332 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
333 rte_eth_dev_get_sec_ctx(
336 /* Retrieve the userdata registered. Here, the userdata
337 * registered is the SA pointer.
340 sa = (struct ipsec_sa *)
341 rte_security_get_userdata(ctx, pkt->udata64);
344 /* userdata could not be retrieved */
348 /* Save SA as priv member in mbuf. This will be used in the
349 * IPsec selector(SP-SA) check.
352 priv = get_priv(pkt);
358 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
367 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
368 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
370 prepare_one_packet(pkts[i], t);
372 /* Process left packets */
373 for (; i < nb_pkts; i++)
374 prepare_one_packet(pkts[i], t);
378 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
379 const struct lcore_conf *qconf)
382 struct rte_ether_hdr *ethhdr;
384 ip = rte_pktmbuf_mtod(pkt, struct ip *);
386 ethhdr = (struct rte_ether_hdr *)
387 rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
389 if (ip->ip_v == IPVERSION) {
390 pkt->ol_flags |= qconf->outbound.ipv4_offloads;
391 pkt->l3_len = sizeof(struct ip);
392 pkt->l2_len = RTE_ETHER_HDR_LEN;
396 /* calculate IPv4 cksum in SW */
397 if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
398 ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
400 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
402 pkt->ol_flags |= qconf->outbound.ipv6_offloads;
403 pkt->l3_len = sizeof(struct ip6_hdr);
404 pkt->l2_len = RTE_ETHER_HDR_LEN;
406 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
409 memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
410 sizeof(struct rte_ether_addr));
411 memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
412 sizeof(struct rte_ether_addr));
416 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
417 const struct lcore_conf *qconf)
420 const int32_t prefetch_offset = 2;
422 for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
423 rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
424 prepare_tx_pkt(pkts[i], port, qconf);
426 /* Process left packets */
427 for (; i < nb_pkts; i++)
428 prepare_tx_pkt(pkts[i], port, qconf);
431 /* Send burst of packets on an output interface */
432 static inline int32_t
433 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
435 struct rte_mbuf **m_table;
439 queueid = qconf->tx_queue_id[port];
440 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
442 prepare_tx_burst(m_table, n, port, qconf);
444 ret = rte_eth_tx_burst(port, queueid, m_table, n);
445 if (unlikely(ret < n)) {
447 rte_pktmbuf_free(m_table[ret]);
454 /* Enqueue a single packet, and send burst if queue is filled */
455 static inline int32_t
456 send_single_packet(struct rte_mbuf *m, uint16_t port)
460 struct lcore_conf *qconf;
462 lcore_id = rte_lcore_id();
464 qconf = &lcore_conf[lcore_id];
465 len = qconf->tx_mbufs[port].len;
466 qconf->tx_mbufs[port].m_table[len] = m;
469 /* enough pkts to be sent */
470 if (unlikely(len == MAX_PKT_BURST)) {
471 send_burst(qconf, MAX_PKT_BURST, port);
475 qconf->tx_mbufs[port].len = len;
480 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
484 uint32_t i, j, res, sa_idx;
486 if (ip->num == 0 || sp == NULL)
489 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
490 ip->num, DEFAULT_MAX_CATEGORIES);
493 for (i = 0; i < ip->num; i++) {
500 if (res == DISCARD) {
505 /* Only check SPI match for processed IPSec packets */
506 if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
511 sa_idx = SPI2IDX(res);
512 if (!inbound_sa_check(sa, m, sa_idx)) {
522 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
531 for (i = 0; i < num; i++) {
534 ip = rte_pktmbuf_mtod(m, struct ip *);
536 if (ip->ip_v == IPVERSION) {
537 trf->ip4.pkts[n4] = m;
538 trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
539 uint8_t *, offsetof(struct ip, ip_p));
541 } else if (ip->ip_v == IP6_VERSION) {
542 trf->ip6.pkts[n6] = m;
543 trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
545 offsetof(struct ip6_hdr, ip6_nxt));
557 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
558 struct ipsec_traffic *traffic)
560 uint16_t nb_pkts_in, n_ip4, n_ip6;
562 n_ip4 = traffic->ip4.num;
563 n_ip6 = traffic->ip6.num;
565 if (app_sa_prm.enable == 0) {
566 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
567 traffic->ipsec.num, MAX_PKT_BURST);
568 split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
570 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
571 traffic->ipsec.saptr, traffic->ipsec.num);
572 ipsec_process(ipsec_ctx, traffic);
575 inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
578 inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
583 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
584 struct traffic_type *ipsec)
587 uint32_t i, j, sa_idx;
589 if (ip->num == 0 || sp == NULL)
592 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
593 ip->num, DEFAULT_MAX_CATEGORIES);
596 for (i = 0; i < ip->num; i++) {
598 sa_idx = SPI2IDX(ip->res[i]);
599 if (ip->res[i] == DISCARD)
601 else if (ip->res[i] == BYPASS)
604 ipsec->res[ipsec->num] = sa_idx;
605 ipsec->pkts[ipsec->num++] = m;
612 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
613 struct ipsec_traffic *traffic)
616 uint16_t idx, nb_pkts_out, i;
618 /* Drop any IPsec traffic from protected ports */
619 for (i = 0; i < traffic->ipsec.num; i++)
620 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
622 traffic->ipsec.num = 0;
624 outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
626 outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
628 if (app_sa_prm.enable == 0) {
630 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
631 traffic->ipsec.res, traffic->ipsec.num,
634 for (i = 0; i < nb_pkts_out; i++) {
635 m = traffic->ipsec.pkts[i];
636 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
637 if (ip->ip_v == IPVERSION) {
638 idx = traffic->ip4.num++;
639 traffic->ip4.pkts[idx] = m;
641 idx = traffic->ip6.num++;
642 traffic->ip6.pkts[idx] = m;
646 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
647 traffic->ipsec.saptr, traffic->ipsec.num);
648 ipsec_process(ipsec_ctx, traffic);
653 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
654 struct ipsec_traffic *traffic)
657 uint32_t nb_pkts_in, i, idx;
659 /* Drop any IPv4 traffic from unprotected ports */
660 for (i = 0; i < traffic->ip4.num; i++)
661 rte_pktmbuf_free(traffic->ip4.pkts[i]);
663 traffic->ip4.num = 0;
665 /* Drop any IPv6 traffic from unprotected ports */
666 for (i = 0; i < traffic->ip6.num; i++)
667 rte_pktmbuf_free(traffic->ip6.pkts[i]);
669 traffic->ip6.num = 0;
671 if (app_sa_prm.enable == 0) {
673 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
674 traffic->ipsec.num, MAX_PKT_BURST);
676 for (i = 0; i < nb_pkts_in; i++) {
677 m = traffic->ipsec.pkts[i];
678 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
679 if (ip->ip_v == IPVERSION) {
680 idx = traffic->ip4.num++;
681 traffic->ip4.pkts[idx] = m;
683 idx = traffic->ip6.num++;
684 traffic->ip6.pkts[idx] = m;
688 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
689 traffic->ipsec.saptr, traffic->ipsec.num);
690 ipsec_process(ipsec_ctx, traffic);
695 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
696 struct ipsec_traffic *traffic)
699 uint32_t nb_pkts_out, i, n;
702 /* Drop any IPsec traffic from protected ports */
703 for (i = 0; i < traffic->ipsec.num; i++)
704 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
708 for (i = 0; i < traffic->ip4.num; i++) {
709 traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
710 traffic->ipsec.res[n++] = single_sa_idx;
713 for (i = 0; i < traffic->ip6.num; i++) {
714 traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
715 traffic->ipsec.res[n++] = single_sa_idx;
718 traffic->ip4.num = 0;
719 traffic->ip6.num = 0;
720 traffic->ipsec.num = n;
722 if (app_sa_prm.enable == 0) {
724 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
725 traffic->ipsec.res, traffic->ipsec.num,
728 /* They all sue the same SA (ip4 or ip6 tunnel) */
729 m = traffic->ipsec.pkts[0];
730 ip = rte_pktmbuf_mtod(m, struct ip *);
731 if (ip->ip_v == IPVERSION) {
732 traffic->ip4.num = nb_pkts_out;
733 for (i = 0; i < nb_pkts_out; i++)
734 traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
736 traffic->ip6.num = nb_pkts_out;
737 for (i = 0; i < nb_pkts_out; i++)
738 traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
741 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
742 traffic->ipsec.saptr, traffic->ipsec.num);
743 ipsec_process(ipsec_ctx, traffic);
747 static inline int32_t
748 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
750 struct ipsec_mbuf_metadata *priv;
753 priv = get_priv(pkt);
756 if (unlikely(sa == NULL)) {
757 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
765 return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
776 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
778 uint32_t hop[MAX_PKT_BURST * 2];
779 uint32_t dst_ip[MAX_PKT_BURST * 2];
782 uint16_t lpm_pkts = 0;
787 /* Need to do an LPM lookup for non-inline packets. Inline packets will
788 * have port ID in the SA
791 for (i = 0; i < nb_pkts; i++) {
792 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
793 /* Security offload not enabled. So an LPM lookup is
794 * required to get the hop
796 offset = offsetof(struct ip, ip_dst);
797 dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
799 dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
804 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
808 for (i = 0; i < nb_pkts; i++) {
809 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
810 /* Read hop from the SA */
811 pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
813 /* Need to use hop returned by lookup */
814 pkt_hop = hop[lpm_pkts++];
817 if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
818 rte_pktmbuf_free(pkts[i]);
821 send_single_packet(pkts[i], pkt_hop & 0xff);
826 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
828 int32_t hop[MAX_PKT_BURST * 2];
829 uint8_t dst_ip[MAX_PKT_BURST * 2][16];
833 uint16_t lpm_pkts = 0;
838 /* Need to do an LPM lookup for non-inline packets. Inline packets will
839 * have port ID in the SA
842 for (i = 0; i < nb_pkts; i++) {
843 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
844 /* Security offload not enabled. So an LPM lookup is
845 * required to get the hop
847 offset = offsetof(struct ip6_hdr, ip6_dst);
848 ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
850 memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
855 rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
860 for (i = 0; i < nb_pkts; i++) {
861 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
862 /* Read hop from the SA */
863 pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
865 /* Need to use hop returned by lookup */
866 pkt_hop = hop[lpm_pkts++];
870 rte_pktmbuf_free(pkts[i]);
873 send_single_packet(pkts[i], pkt_hop & 0xff);
878 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
879 uint8_t nb_pkts, uint16_t portid)
881 struct ipsec_traffic traffic;
883 prepare_traffic(pkts, &traffic, nb_pkts);
885 if (unlikely(single_sa)) {
886 if (UNPROTECTED_PORT(portid))
887 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
889 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
891 if (UNPROTECTED_PORT(portid))
892 process_pkts_inbound(&qconf->inbound, &traffic);
894 process_pkts_outbound(&qconf->outbound, &traffic);
897 route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
898 route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
902 drain_tx_buffers(struct lcore_conf *qconf)
907 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
908 buf = &qconf->tx_mbufs[portid];
911 send_burst(qconf, buf->len, portid);
917 drain_crypto_buffers(struct lcore_conf *qconf)
920 struct ipsec_ctx *ctx;
922 /* drain inbound buffers*/
923 ctx = &qconf->inbound;
924 for (i = 0; i != ctx->nb_qps; i++) {
925 if (ctx->tbl[i].len != 0)
926 enqueue_cop_burst(ctx->tbl + i);
929 /* drain outbound buffers*/
930 ctx = &qconf->outbound;
931 for (i = 0; i != ctx->nb_qps; i++) {
932 if (ctx->tbl[i].len != 0)
933 enqueue_cop_burst(ctx->tbl + i);
938 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
939 struct ipsec_ctx *ctx)
942 struct ipsec_traffic trf;
944 if (app_sa_prm.enable == 0) {
946 /* dequeue packets from crypto-queue */
947 n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
948 RTE_DIM(trf.ipsec.pkts));
953 /* split traffic by ipv4-ipv6 */
954 split46_traffic(&trf, trf.ipsec.pkts, n);
956 ipsec_cqp_process(ctx, &trf);
958 /* process ipv4 packets */
959 if (trf.ip4.num != 0) {
960 inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
961 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
964 /* process ipv6 packets */
965 if (trf.ip6.num != 0) {
966 inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
967 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
972 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
973 struct ipsec_ctx *ctx)
976 struct ipsec_traffic trf;
978 if (app_sa_prm.enable == 0) {
980 /* dequeue packets from crypto-queue */
981 n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
982 RTE_DIM(trf.ipsec.pkts));
987 /* split traffic by ipv4-ipv6 */
988 split46_traffic(&trf, trf.ipsec.pkts, n);
990 ipsec_cqp_process(ctx, &trf);
992 /* process ipv4 packets */
993 if (trf.ip4.num != 0)
994 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
996 /* process ipv6 packets */
997 if (trf.ip6.num != 0)
998 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1001 /* main processing loop */
1003 main_loop(__attribute__((unused)) void *dummy)
1005 struct rte_mbuf *pkts[MAX_PKT_BURST];
1007 uint64_t prev_tsc, diff_tsc, cur_tsc;
1011 struct lcore_conf *qconf;
1013 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1014 / US_PER_S * BURST_TX_DRAIN_US;
1015 struct lcore_rx_queue *rxql;
1018 lcore_id = rte_lcore_id();
1019 qconf = &lcore_conf[lcore_id];
1020 rxql = qconf->rx_queue_list;
1021 socket_id = rte_lcore_to_socket_id(lcore_id);
1023 qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
1024 qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
1025 qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
1026 qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
1027 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
1028 qconf->inbound.cdev_map = cdev_map_in;
1029 qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
1030 qconf->inbound.session_priv_pool =
1031 socket_ctx[socket_id].session_priv_pool;
1032 qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
1033 qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
1034 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
1035 qconf->outbound.cdev_map = cdev_map_out;
1036 qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
1037 qconf->outbound.session_priv_pool =
1038 socket_ctx[socket_id].session_priv_pool;
1040 if (qconf->nb_rx_queue == 0) {
1041 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1046 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1048 for (i = 0; i < qconf->nb_rx_queue; i++) {
1049 portid = rxql[i].port_id;
1050 queueid = rxql[i].queue_id;
1051 RTE_LOG(INFO, IPSEC,
1052 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1053 lcore_id, portid, queueid);
1057 cur_tsc = rte_rdtsc();
1059 /* TX queue buffer drain */
1060 diff_tsc = cur_tsc - prev_tsc;
1062 if (unlikely(diff_tsc > drain_tsc)) {
1063 drain_tx_buffers(qconf);
1064 drain_crypto_buffers(qconf);
1068 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1070 /* Read packets from RX queues */
1071 portid = rxql[i].port_id;
1072 queueid = rxql[i].queue_id;
1073 nb_rx = rte_eth_rx_burst(portid, queueid,
1074 pkts, MAX_PKT_BURST);
1077 process_pkts(qconf, pkts, nb_rx, portid);
1079 /* dequeue and process completed crypto-ops */
1080 if (UNPROTECTED_PORT(portid))
1081 drain_inbound_crypto_queues(qconf,
1084 drain_outbound_crypto_queues(qconf,
1098 if (lcore_params == NULL) {
1099 printf("Error: No port/queue/core mappings\n");
1103 for (i = 0; i < nb_lcore_params; ++i) {
1104 lcore = lcore_params[i].lcore_id;
1105 if (!rte_lcore_is_enabled(lcore)) {
1106 printf("error: lcore %hhu is not enabled in "
1107 "lcore mask\n", lcore);
1110 socket_id = rte_lcore_to_socket_id(lcore);
1111 if (socket_id != 0 && numa_on == 0) {
1112 printf("warning: lcore %hhu is on socket %d "
1116 portid = lcore_params[i].port_id;
1117 if ((enabled_port_mask & (1 << portid)) == 0) {
1118 printf("port %u is not enabled in port mask\n", portid);
1121 if (!rte_eth_dev_is_valid_port(portid)) {
1122 printf("port %u is not present on the board\n", portid);
1130 get_port_nb_rx_queues(const uint16_t port)
1135 for (i = 0; i < nb_lcore_params; ++i) {
1136 if (lcore_params[i].port_id == port &&
1137 lcore_params[i].queue_id > queue)
1138 queue = lcore_params[i].queue_id;
1140 return (uint8_t)(++queue);
1144 init_lcore_rx_queues(void)
1146 uint16_t i, nb_rx_queue;
1149 for (i = 0; i < nb_lcore_params; ++i) {
1150 lcore = lcore_params[i].lcore_id;
1151 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
1152 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1153 printf("error: too many queues (%u) for lcore: %u\n",
1154 nb_rx_queue + 1, lcore);
1157 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1158 lcore_params[i].port_id;
1159 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1160 lcore_params[i].queue_id;
1161 lcore_conf[lcore].nb_rx_queue++;
1168 print_usage(const char *prgname)
1170 fprintf(stderr, "%s [EAL options] --"
1176 " [-w REPLAY_WINDOW_SIZE]"
1180 " --config (port,queue,lcore)[,(port,queue,lcore)]"
1181 " [--single-sa SAIDX]"
1182 " [--cryptodev_mask MASK]"
1183 " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
1184 " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
1186 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
1187 " -P : Enable promiscuous mode\n"
1188 " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
1189 " -j FRAMESIZE: Enable jumbo frame with 'FRAMESIZE' as maximum\n"
1191 " -l enables code-path that uses librte_ipsec\n"
1192 " -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n"
1193 " size for each SA\n"
1195 " -a enables SA SQN atomic behaviour\n"
1196 " -f CONFIG_FILE: Configuration file\n"
1197 " --config (port,queue,lcore): Rx queue configuration\n"
1198 " --single-sa SAIDX: Use single SA index for outbound traffic,\n"
1199 " bypassing the SP\n"
1200 " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
1201 " devices to configure\n"
1202 " --" CMD_LINE_OPT_RX_OFFLOAD
1203 ": bitmask of the RX HW offload capabilities to enable/use\n"
1204 " (DEV_RX_OFFLOAD_*)\n"
1205 " --" CMD_LINE_OPT_TX_OFFLOAD
1206 ": bitmask of the TX HW offload capabilities to enable/use\n"
1207 " (DEV_TX_OFFLOAD_*)\n"
1213 parse_mask(const char *str, uint64_t *val)
1219 t = strtoul(str, &end, 0);
1220 if (errno != 0 || end[0] != 0)
1228 parse_portmask(const char *portmask)
1233 /* parse hexadecimal string */
1234 pm = strtoul(portmask, &end, 16);
1235 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1238 if ((pm == 0) && errno)
1245 parse_decimal(const char *str)
1250 num = strtoul(str, &end, 10);
1251 if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
1258 parse_config(const char *q_arg)
1261 const char *p, *p0 = q_arg;
1269 unsigned long int_fld[_NUM_FLD];
1270 char *str_fld[_NUM_FLD];
1274 nb_lcore_params = 0;
1276 while ((p = strchr(p0, '(')) != NULL) {
1278 p0 = strchr(p, ')');
1283 if (size >= sizeof(s))
1286 snprintf(s, sizeof(s), "%.*s", size, p);
1287 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1290 for (i = 0; i < _NUM_FLD; i++) {
1292 int_fld[i] = strtoul(str_fld[i], &end, 0);
1293 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1296 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1297 printf("exceeded max number of lcore params: %hu\n",
1301 lcore_params_array[nb_lcore_params].port_id =
1302 (uint8_t)int_fld[FLD_PORT];
1303 lcore_params_array[nb_lcore_params].queue_id =
1304 (uint8_t)int_fld[FLD_QUEUE];
1305 lcore_params_array[nb_lcore_params].lcore_id =
1306 (uint8_t)int_fld[FLD_LCORE];
1309 lcore_params = lcore_params_array;
1314 print_app_sa_prm(const struct app_sa_prm *prm)
1316 printf("librte_ipsec usage: %s\n",
1317 (prm->enable == 0) ? "disabled" : "enabled");
1319 if (prm->enable == 0)
1322 printf("replay window size: %u\n", prm->window_size);
1323 printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
1324 printf("SA flags: %#" PRIx64 "\n", prm->flags);
1328 parse_args(int32_t argc, char **argv)
1332 int32_t option_index;
1333 char *prgname = argv[0];
1334 int32_t f_present = 0;
1338 while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:",
1339 lgopts, &option_index)) != EOF) {
1343 enabled_port_mask = parse_portmask(optarg);
1344 if (enabled_port_mask == 0) {
1345 printf("invalid portmask\n");
1346 print_usage(prgname);
1351 printf("Promiscuous mode selected\n");
1355 unprotected_port_mask = parse_portmask(optarg);
1356 if (unprotected_port_mask == 0) {
1357 printf("invalid unprotected portmask\n");
1358 print_usage(prgname);
1363 if (f_present == 1) {
1364 printf("\"-f\" option present more than "
1366 print_usage(prgname);
1369 if (parse_cfg_file(optarg) < 0) {
1370 printf("parsing file \"%s\" failed\n",
1372 print_usage(prgname);
1379 int32_t size = parse_decimal(optarg);
1381 printf("Invalid jumbo frame size\n");
1383 print_usage(prgname);
1386 printf("Using default value 9000\n");
1392 printf("Enabled jumbo frames size %u\n", frame_size);
1395 app_sa_prm.enable = 1;
1398 app_sa_prm.enable = 1;
1399 app_sa_prm.window_size = parse_decimal(optarg);
1402 app_sa_prm.enable = 1;
1403 app_sa_prm.enable_esn = 1;
1406 app_sa_prm.enable = 1;
1407 app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
1409 case CMD_LINE_OPT_CONFIG_NUM:
1410 ret = parse_config(optarg);
1412 printf("Invalid config\n");
1413 print_usage(prgname);
1417 case CMD_LINE_OPT_SINGLE_SA_NUM:
1418 ret = parse_decimal(optarg);
1420 printf("Invalid argument[sa_idx]\n");
1421 print_usage(prgname);
1427 single_sa_idx = ret;
1428 printf("Configured with single SA index %u\n",
1431 case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1432 ret = parse_portmask(optarg);
1434 printf("Invalid argument[portmask]\n");
1435 print_usage(prgname);
1440 enabled_cryptodev_mask = ret;
1442 case CMD_LINE_OPT_RX_OFFLOAD_NUM:
1443 ret = parse_mask(optarg, &dev_rx_offload);
1445 printf("Invalid argument for \'%s\': %s\n",
1446 CMD_LINE_OPT_RX_OFFLOAD, optarg);
1447 print_usage(prgname);
1451 case CMD_LINE_OPT_TX_OFFLOAD_NUM:
1452 ret = parse_mask(optarg, &dev_tx_offload);
1454 printf("Invalid argument for \'%s\': %s\n",
1455 CMD_LINE_OPT_TX_OFFLOAD, optarg);
1456 print_usage(prgname);
1461 print_usage(prgname);
1466 if (f_present == 0) {
1467 printf("Mandatory option \"-f\" not present\n");
1471 print_app_sa_prm(&app_sa_prm);
1474 argv[optind-1] = prgname;
1477 optind = 1; /* reset getopt lib */
1482 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
1484 char buf[RTE_ETHER_ADDR_FMT_SIZE];
1485 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
1486 printf("%s%s", name, buf);
1490 * Update destination ethaddr for the port.
1493 add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr)
1495 if (port >= RTE_DIM(ethaddr_tbl))
1498 ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr);
1502 /* Check the link status of all ports in up to 9s, and print them finally */
1504 check_all_ports_link_status(uint32_t port_mask)
1506 #define CHECK_INTERVAL 100 /* 100ms */
1507 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1509 uint8_t count, all_ports_up, print_flag = 0;
1510 struct rte_eth_link link;
1512 printf("\nChecking link status");
1514 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1516 RTE_ETH_FOREACH_DEV(portid) {
1517 if ((port_mask & (1 << portid)) == 0)
1519 memset(&link, 0, sizeof(link));
1520 rte_eth_link_get_nowait(portid, &link);
1521 /* print link status if flag set */
1522 if (print_flag == 1) {
1523 if (link.link_status)
1525 "Port%d Link Up - speed %u Mbps -%s\n",
1526 portid, link.link_speed,
1527 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1528 ("full-duplex") : ("half-duplex\n"));
1530 printf("Port %d Link Down\n", portid);
1533 /* clear all_ports_up flag if any link down */
1534 if (link.link_status == ETH_LINK_DOWN) {
1539 /* after finally printing all link status, get out */
1540 if (print_flag == 1)
1543 if (all_ports_up == 0) {
1546 rte_delay_ms(CHECK_INTERVAL);
1549 /* set the print_flag if all ports up or timeout */
1550 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1558 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1559 uint16_t qp, struct lcore_params *params,
1560 struct ipsec_ctx *ipsec_ctx,
1561 const struct rte_cryptodev_capabilities *cipher,
1562 const struct rte_cryptodev_capabilities *auth,
1563 const struct rte_cryptodev_capabilities *aead)
1567 struct cdev_key key = { 0 };
1569 key.lcore_id = params->lcore_id;
1571 key.cipher_algo = cipher->sym.cipher.algo;
1573 key.auth_algo = auth->sym.auth.algo;
1575 key.aead_algo = aead->sym.aead.algo;
1577 ret = rte_hash_lookup(map, &key);
1581 for (i = 0; i < ipsec_ctx->nb_qps; i++)
1582 if (ipsec_ctx->tbl[i].id == cdev_id)
1585 if (i == ipsec_ctx->nb_qps) {
1586 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1587 printf("Maximum number of crypto devices assigned to "
1588 "a core, increase MAX_QP_PER_LCORE value\n");
1591 ipsec_ctx->tbl[i].id = cdev_id;
1592 ipsec_ctx->tbl[i].qp = qp;
1593 ipsec_ctx->nb_qps++;
1594 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1595 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1599 ret = rte_hash_add_key_data(map, &key, (void *)i);
1601 printf("Faled to insert cdev mapping for (lcore %u, "
1602 "cdev %u, qp %u), errno %d\n",
1603 key.lcore_id, ipsec_ctx->tbl[i].id,
1604 ipsec_ctx->tbl[i].qp, ret);
1612 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1613 uint16_t qp, struct lcore_params *params)
1616 const struct rte_cryptodev_capabilities *i, *j;
1617 struct rte_hash *map;
1618 struct lcore_conf *qconf;
1619 struct ipsec_ctx *ipsec_ctx;
1622 qconf = &lcore_conf[params->lcore_id];
1624 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1626 ipsec_ctx = &qconf->outbound;
1630 ipsec_ctx = &qconf->inbound;
1634 /* Required cryptodevs with operation chainning */
1635 if (!(dev_info->feature_flags &
1636 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1639 for (i = dev_info->capabilities;
1640 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1641 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1644 if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1645 ret |= add_mapping(map, str, cdev_id, qp, params,
1646 ipsec_ctx, NULL, NULL, i);
1650 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1653 for (j = dev_info->capabilities;
1654 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1655 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1658 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1661 ret |= add_mapping(map, str, cdev_id, qp, params,
1662 ipsec_ctx, i, j, NULL);
1669 /* Check if the device is enabled by cryptodev_mask */
1671 check_cryptodev_mask(uint8_t cdev_id)
1673 if (enabled_cryptodev_mask & (1 << cdev_id))
1680 cryptodevs_init(void)
1682 struct rte_cryptodev_config dev_conf;
1683 struct rte_cryptodev_qp_conf qp_conf;
1684 uint16_t idx, max_nb_qps, qp, i;
1685 int16_t cdev_id, port_id;
1686 struct rte_hash_parameters params = { 0 };
1688 params.entries = CDEV_MAP_ENTRIES;
1689 params.key_len = sizeof(struct cdev_key);
1690 params.hash_func = rte_jhash;
1691 params.hash_func_init_val = 0;
1692 params.socket_id = rte_socket_id();
1694 params.name = "cdev_map_in";
1695 cdev_map_in = rte_hash_create(¶ms);
1696 if (cdev_map_in == NULL)
1697 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1700 params.name = "cdev_map_out";
1701 cdev_map_out = rte_hash_create(¶ms);
1702 if (cdev_map_out == NULL)
1703 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1706 printf("lcore/cryptodev/qp mappings:\n");
1708 uint32_t max_sess_sz = 0, sess_sz;
1709 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1712 /* Get crypto priv session size */
1713 sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
1714 if (sess_sz > max_sess_sz)
1715 max_sess_sz = sess_sz;
1718 * If crypto device is security capable, need to check the
1719 * size of security session as well.
1722 /* Get security context of the crypto device */
1723 sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
1724 if (sec_ctx == NULL)
1727 /* Get size of security session */
1728 sess_sz = rte_security_session_get_size(sec_ctx);
1729 if (sess_sz > max_sess_sz)
1730 max_sess_sz = sess_sz;
1732 RTE_ETH_FOREACH_DEV(port_id) {
1735 if ((enabled_port_mask & (1 << port_id)) == 0)
1738 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
1739 if (sec_ctx == NULL)
1742 sess_sz = rte_security_session_get_size(sec_ctx);
1743 if (sess_sz > max_sess_sz)
1744 max_sess_sz = sess_sz;
1748 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1749 struct rte_cryptodev_info cdev_info;
1751 if (check_cryptodev_mask((uint8_t)cdev_id))
1754 rte_cryptodev_info_get(cdev_id, &cdev_info);
1756 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1757 max_nb_qps = cdev_info.max_nb_queue_pairs;
1759 max_nb_qps = nb_lcore_params;
1763 while (qp < max_nb_qps && i < nb_lcore_params) {
1764 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1765 &lcore_params[idx]))
1768 idx = idx % nb_lcore_params;
1775 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1776 dev_conf.nb_queue_pairs = qp;
1777 dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
1779 uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
1780 if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS)
1781 rte_exit(EXIT_FAILURE,
1782 "Device does not support at least %u "
1783 "sessions", CDEV_MP_NB_OBJS);
1785 if (!socket_ctx[dev_conf.socket_id].session_pool) {
1786 char mp_name[RTE_MEMPOOL_NAMESIZE];
1787 struct rte_mempool *sess_mp;
1789 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1790 "sess_mp_%u", dev_conf.socket_id);
1791 sess_mp = rte_cryptodev_sym_session_pool_create(
1792 mp_name, CDEV_MP_NB_OBJS,
1793 0, CDEV_MP_CACHE_SZ, 0,
1794 dev_conf.socket_id);
1795 socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
1798 if (!socket_ctx[dev_conf.socket_id].session_priv_pool) {
1799 char mp_name[RTE_MEMPOOL_NAMESIZE];
1800 struct rte_mempool *sess_mp;
1802 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1803 "sess_mp_priv_%u", dev_conf.socket_id);
1804 sess_mp = rte_mempool_create(mp_name,
1808 0, NULL, NULL, NULL,
1809 NULL, dev_conf.socket_id,
1811 socket_ctx[dev_conf.socket_id].session_priv_pool =
1815 if (!socket_ctx[dev_conf.socket_id].session_priv_pool ||
1816 !socket_ctx[dev_conf.socket_id].session_pool)
1817 rte_exit(EXIT_FAILURE,
1818 "Cannot create session pool on socket %d\n",
1819 dev_conf.socket_id);
1821 printf("Allocated session pool on socket %d\n",
1822 dev_conf.socket_id);
1824 if (rte_cryptodev_configure(cdev_id, &dev_conf))
1825 rte_panic("Failed to initialize cryptodev %u\n",
1828 qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
1829 qp_conf.mp_session =
1830 socket_ctx[dev_conf.socket_id].session_pool;
1831 qp_conf.mp_session_private =
1832 socket_ctx[dev_conf.socket_id].session_priv_pool;
1833 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1834 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1835 &qp_conf, dev_conf.socket_id))
1836 rte_panic("Failed to setup queue %u for "
1837 "cdev_id %u\n", 0, cdev_id);
1839 if (rte_cryptodev_start(cdev_id))
1840 rte_panic("Failed to start cryptodev %u\n",
1844 /* create session pools for eth devices that implement security */
1845 RTE_ETH_FOREACH_DEV(port_id) {
1846 if ((enabled_port_mask & (1 << port_id)) &&
1847 rte_eth_dev_get_sec_ctx(port_id)) {
1848 int socket_id = rte_eth_dev_socket_id(port_id);
1850 if (!socket_ctx[socket_id].session_priv_pool) {
1851 char mp_name[RTE_MEMPOOL_NAMESIZE];
1852 struct rte_mempool *sess_mp;
1854 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1855 "sess_mp_%u", socket_id);
1856 sess_mp = rte_mempool_create(mp_name,
1857 (CDEV_MP_NB_OBJS * 2),
1860 0, NULL, NULL, NULL,
1863 if (sess_mp == NULL)
1864 rte_exit(EXIT_FAILURE,
1865 "Cannot create session pool "
1866 "on socket %d\n", socket_id);
1868 printf("Allocated session pool "
1869 "on socket %d\n", socket_id);
1870 socket_ctx[socket_id].session_priv_pool =
1883 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
1885 struct rte_eth_dev_info dev_info;
1886 struct rte_eth_txconf *txconf;
1887 uint16_t nb_tx_queue, nb_rx_queue;
1888 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1889 int32_t ret, socket_id;
1890 struct lcore_conf *qconf;
1891 struct rte_ether_addr ethaddr;
1892 struct rte_eth_conf local_port_conf = port_conf;
1894 rte_eth_dev_info_get(portid, &dev_info);
1896 /* limit allowed HW offloafs, as user requested */
1897 dev_info.rx_offload_capa &= dev_rx_offload;
1898 dev_info.tx_offload_capa &= dev_tx_offload;
1900 printf("Configuring device port %u:\n", portid);
1902 rte_eth_macaddr_get(portid, ðaddr);
1903 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr);
1904 print_ethaddr("Address: ", ðaddr);
1907 nb_rx_queue = get_port_nb_rx_queues(portid);
1908 nb_tx_queue = nb_lcores;
1910 if (nb_rx_queue > dev_info.max_rx_queues)
1911 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1912 "(max rx queue is %u)\n",
1913 nb_rx_queue, dev_info.max_rx_queues);
1915 if (nb_tx_queue > dev_info.max_tx_queues)
1916 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1917 "(max tx queue is %u)\n",
1918 nb_tx_queue, dev_info.max_tx_queues);
1920 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1921 nb_rx_queue, nb_tx_queue);
1924 local_port_conf.rxmode.max_rx_pkt_len = frame_size;
1925 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1928 local_port_conf.rxmode.offloads |= req_rx_offloads;
1929 local_port_conf.txmode.offloads |= req_tx_offloads;
1931 /* Check that all required capabilities are supported */
1932 if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1933 local_port_conf.rxmode.offloads)
1934 rte_exit(EXIT_FAILURE,
1935 "Error: port %u required RX offloads: 0x%" PRIx64
1936 ", avaialbe RX offloads: 0x%" PRIx64 "\n",
1937 portid, local_port_conf.rxmode.offloads,
1938 dev_info.rx_offload_capa);
1940 if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1941 local_port_conf.txmode.offloads)
1942 rte_exit(EXIT_FAILURE,
1943 "Error: port %u required TX offloads: 0x%" PRIx64
1944 ", avaialbe TX offloads: 0x%" PRIx64 "\n",
1945 portid, local_port_conf.txmode.offloads,
1946 dev_info.tx_offload_capa);
1948 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1949 local_port_conf.txmode.offloads |=
1950 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1952 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
1953 local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
1955 printf("port %u configurng rx_offloads=0x%" PRIx64
1956 ", tx_offloads=0x%" PRIx64 "\n",
1957 portid, local_port_conf.rxmode.offloads,
1958 local_port_conf.txmode.offloads);
1960 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1961 dev_info.flow_type_rss_offloads;
1962 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1963 port_conf.rx_adv_conf.rss_conf.rss_hf) {
1964 printf("Port %u modified RSS hash function based on hardware support,"
1965 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
1967 port_conf.rx_adv_conf.rss_conf.rss_hf,
1968 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1971 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
1974 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1975 "err=%d, port=%d\n", ret, portid);
1977 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
1979 rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
1980 "err=%d, port=%d\n", ret, portid);
1982 /* init one TX queue per lcore */
1984 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1985 if (rte_lcore_is_enabled(lcore_id) == 0)
1989 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1994 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
1996 txconf = &dev_info.default_txconf;
1997 txconf->offloads = local_port_conf.txmode.offloads;
1999 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
2002 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
2003 "err=%d, port=%d\n", ret, portid);
2005 qconf = &lcore_conf[lcore_id];
2006 qconf->tx_queue_id[portid] = tx_queueid;
2008 /* Pre-populate pkt offloads based on capabilities */
2009 qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
2010 qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
2011 if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
2012 qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
2016 /* init RX queues */
2017 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
2018 struct rte_eth_rxconf rxq_conf;
2020 if (portid != qconf->rx_queue_list[queue].port_id)
2023 rx_queueid = qconf->rx_queue_list[queue].queue_id;
2025 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
2028 rxq_conf = dev_info.default_rxconf;
2029 rxq_conf.offloads = local_port_conf.rxmode.offloads;
2030 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
2031 nb_rxd, socket_id, &rxq_conf,
2032 socket_ctx[socket_id].mbuf_pool);
2034 rte_exit(EXIT_FAILURE,
2035 "rte_eth_rx_queue_setup: err=%d, "
2036 "port=%d\n", ret, portid);
2043 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
2046 uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) :
2047 RTE_MBUF_DEFAULT_BUF_SIZE;
2050 snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
2051 ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
2052 MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
2055 if (ctx->mbuf_pool == NULL)
2056 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
2059 printf("Allocated mbuf pool on socket %d\n", socket_id);
2063 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
2065 struct ipsec_sa *sa;
2067 /* For inline protocol processing, the metadata in the event will
2068 * uniquely identify the security session which raised the event.
2069 * Application would then need the userdata it had registered with the
2070 * security session to process the event.
2073 sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
2076 /* userdata could not be retrieved */
2080 /* Sequence number over flow. SA need to be re-established */
2086 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2087 void *param, void *ret_param)
2090 struct rte_eth_event_ipsec_desc *event_desc = NULL;
2091 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
2092 rte_eth_dev_get_sec_ctx(port_id);
2094 RTE_SET_USED(param);
2096 if (type != RTE_ETH_EVENT_IPSEC)
2099 event_desc = ret_param;
2100 if (event_desc == NULL) {
2101 printf("Event descriptor not set\n");
2105 md = event_desc->metadata;
2107 if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
2108 return inline_ipsec_event_esn_overflow(ctx, md);
2109 else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
2110 printf("Invalid IPsec event reported\n");
2118 main(int32_t argc, char **argv)
2124 uint64_t req_rx_offloads, req_tx_offloads;
2127 ret = rte_eal_init(argc, argv);
2129 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2133 /* parse application arguments (after the EAL ones) */
2134 ret = parse_args(argc, argv);
2136 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
2138 if ((unprotected_port_mask & enabled_port_mask) !=
2139 unprotected_port_mask)
2140 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
2141 unprotected_port_mask);
2143 if (check_params() < 0)
2144 rte_exit(EXIT_FAILURE, "check_params failed\n");
2146 ret = init_lcore_rx_queues();
2148 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2150 nb_lcores = rte_lcore_count();
2152 /* Replicate each context per socket */
2153 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2154 if (rte_lcore_is_enabled(lcore_id) == 0)
2158 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2162 if (socket_ctx[socket_id].mbuf_pool)
2166 sp4_init(&socket_ctx[socket_id], socket_id);
2168 sp6_init(&socket_ctx[socket_id], socket_id);
2171 sa_init(&socket_ctx[socket_id], socket_id);
2173 rt_init(&socket_ctx[socket_id], socket_id);
2175 pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
2178 RTE_ETH_FOREACH_DEV(portid) {
2179 if ((enabled_port_mask & (1 << portid)) == 0)
2182 sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads);
2183 port_init(portid, req_rx_offloads, req_tx_offloads);
2189 RTE_ETH_FOREACH_DEV(portid) {
2190 if ((enabled_port_mask & (1 << portid)) == 0)
2194 ret = rte_eth_dev_start(portid);
2196 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
2197 "err=%d, port=%d\n", ret, portid);
2199 * If enabled, put device in promiscuous mode.
2200 * This allows IO forwarding mode to forward packets
2201 * to itself through 2 cross-connected ports of the
2205 rte_eth_promiscuous_enable(portid);
2207 rte_eth_dev_callback_register(portid,
2208 RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
2211 check_all_ports_link_status(enabled_port_mask);
2213 /* launch per-lcore init on every lcore */
2214 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2215 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2216 if (rte_eal_wait_lcore(lcore_id) < 0)