1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
10 #include <netinet/in.h>
11 #include <netinet/ip.h>
12 #include <netinet/ip6.h>
14 #include <sys/queue.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
23 #include <rte_launch.h>
24 #include <rte_atomic.h>
25 #include <rte_cycles.h>
26 #include <rte_prefetch.h>
27 #include <rte_lcore.h>
28 #include <rte_per_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_interrupts.h>
31 #include <rte_random.h>
32 #include <rte_debug.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
41 #include <rte_jhash.h>
42 #include <rte_cryptodev.h>
43 #include <rte_security.h>
48 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
50 #define MAX_JUMBO_PKT_LEN 9600
52 #define MEMPOOL_CACHE_SIZE 256
54 #define NB_MBUF (32000)
56 #define CDEV_QUEUE_DESC 2048
57 #define CDEV_MAP_ENTRIES 16384
58 #define CDEV_MP_NB_OBJS 1024
59 #define CDEV_MP_CACHE_SZ 64
60 #define MAX_QUEUE_PAIRS 1
62 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
66 /* Configure how many packets ahead to prefetch, when reading packets */
67 #define PREFETCH_OFFSET 3
69 #define MAX_RX_QUEUE_PER_LCORE 16
71 #define MAX_LCORE_PARAMS 1024
73 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
76 * Configurable number of RX/TX ring descriptors
78 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
79 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
80 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
81 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
83 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
84 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
85 (((uint64_t)((a) & 0xff) << 56) | \
86 ((uint64_t)((b) & 0xff) << 48) | \
87 ((uint64_t)((c) & 0xff) << 40) | \
88 ((uint64_t)((d) & 0xff) << 32) | \
89 ((uint64_t)((e) & 0xff) << 24) | \
90 ((uint64_t)((f) & 0xff) << 16) | \
91 ((uint64_t)((g) & 0xff) << 8) | \
92 ((uint64_t)(h) & 0xff))
94 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
95 (((uint64_t)((h) & 0xff) << 56) | \
96 ((uint64_t)((g) & 0xff) << 48) | \
97 ((uint64_t)((f) & 0xff) << 40) | \
98 ((uint64_t)((e) & 0xff) << 32) | \
99 ((uint64_t)((d) & 0xff) << 24) | \
100 ((uint64_t)((c) & 0xff) << 16) | \
101 ((uint64_t)((b) & 0xff) << 8) | \
102 ((uint64_t)(a) & 0xff))
104 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
106 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
107 (addr)->addr_bytes[0], (addr)->addr_bytes[1], \
108 (addr)->addr_bytes[2], (addr)->addr_bytes[3], \
109 (addr)->addr_bytes[4], (addr)->addr_bytes[5], \
112 /* port/source ethernet addr and destination ethernet addr */
113 struct ethaddr_info {
117 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
118 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
119 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
120 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
121 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
124 #define CMD_LINE_OPT_CONFIG "config"
125 #define CMD_LINE_OPT_SINGLE_SA "single-sa"
126 #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
127 #define CMD_LINE_OPT_RX_OFFLOAD "rxoffload"
128 #define CMD_LINE_OPT_TX_OFFLOAD "txoffload"
131 /* long options mapped to a short option */
133 /* first long only option value must be >= 256, so that we won't
134 * conflict with short options
136 CMD_LINE_OPT_MIN_NUM = 256,
137 CMD_LINE_OPT_CONFIG_NUM,
138 CMD_LINE_OPT_SINGLE_SA_NUM,
139 CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
140 CMD_LINE_OPT_RX_OFFLOAD_NUM,
141 CMD_LINE_OPT_TX_OFFLOAD_NUM,
144 static const struct option lgopts[] = {
145 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
146 {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
147 {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
148 {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
149 {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
153 /* mask of enabled ports */
154 static uint32_t enabled_port_mask;
155 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
156 static uint32_t unprotected_port_mask;
157 static int32_t promiscuous_on = 1;
158 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
159 static uint32_t nb_lcores;
160 static uint32_t single_sa;
161 static uint32_t single_sa_idx;
162 static uint32_t frame_size;
165 * RX/TX HW offload capabilities to enable/use on ethernet ports.
166 * By default all capabilities are enabled.
168 static uint64_t dev_rx_offload = UINT64_MAX;
169 static uint64_t dev_tx_offload = UINT64_MAX;
171 struct lcore_rx_queue {
174 } __rte_cache_aligned;
176 struct lcore_params {
180 } __rte_cache_aligned;
182 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
184 static struct lcore_params *lcore_params;
185 static uint16_t nb_lcore_params;
187 static struct rte_hash *cdev_map_in;
188 static struct rte_hash *cdev_map_out;
192 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
196 uint16_t nb_rx_queue;
197 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
198 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
199 struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
200 struct ipsec_ctx inbound;
201 struct ipsec_ctx outbound;
202 struct rt_ctx *rt4_ctx;
203 struct rt_ctx *rt6_ctx;
204 } __rte_cache_aligned;
206 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
208 static struct rte_eth_conf port_conf = {
210 .mq_mode = ETH_MQ_RX_RSS,
211 .max_rx_pkt_len = ETHER_MAX_LEN,
213 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
218 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
219 ETH_RSS_TCP | ETH_RSS_SCTP,
223 .mq_mode = ETH_MQ_TX_NONE,
227 static struct socket_ctx socket_ctx[NB_SOCKETS];
229 struct traffic_type {
230 const uint8_t *data[MAX_PKT_BURST * 2];
231 struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
232 uint32_t res[MAX_PKT_BURST * 2];
236 struct ipsec_traffic {
237 struct traffic_type ipsec;
238 struct traffic_type ip4;
239 struct traffic_type ip6;
243 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
246 struct ether_hdr *eth;
248 eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
249 if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
250 nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
251 nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
252 if (*nlp == IPPROTO_ESP)
253 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
255 t->ip4.data[t->ip4.num] = nlp;
256 t->ip4.pkts[(t->ip4.num)++] = pkt;
258 } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
259 nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
260 nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
261 if (*nlp == IPPROTO_ESP)
262 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
264 t->ip6.data[t->ip6.num] = nlp;
265 t->ip6.pkts[(t->ip6.num)++] = pkt;
268 /* Unknown/Unsupported type, drop the packet */
269 RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
270 rte_pktmbuf_free(pkt);
273 /* Check if the packet has been processed inline. For inline protocol
274 * processed packets, the metadata in the mbuf can be used to identify
275 * the security processing done on the packet. The metadata will be
276 * used to retrieve the application registered userdata associated
277 * with the security session.
280 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
282 struct ipsec_mbuf_metadata *priv;
283 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
284 rte_eth_dev_get_sec_ctx(
287 /* Retrieve the userdata registered. Here, the userdata
288 * registered is the SA pointer.
291 sa = (struct ipsec_sa *)
292 rte_security_get_userdata(ctx, pkt->udata64);
295 /* userdata could not be retrieved */
299 /* Save SA as priv member in mbuf. This will be used in the
300 * IPsec selector(SP-SA) check.
303 priv = get_priv(pkt);
309 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
318 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
319 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
321 prepare_one_packet(pkts[i], t);
323 /* Process left packets */
324 for (; i < nb_pkts; i++)
325 prepare_one_packet(pkts[i], t);
329 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
330 const struct lcore_conf *qconf)
333 struct ether_hdr *ethhdr;
335 ip = rte_pktmbuf_mtod(pkt, struct ip *);
337 ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
339 if (ip->ip_v == IPVERSION) {
340 pkt->ol_flags |= qconf->outbound.ipv4_offloads;
341 pkt->l3_len = sizeof(struct ip);
342 pkt->l2_len = ETHER_HDR_LEN;
346 /* calculate IPv4 cksum in SW */
347 if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
348 ip->ip_sum = rte_ipv4_cksum((struct ipv4_hdr *)ip);
350 ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
352 pkt->ol_flags |= qconf->outbound.ipv6_offloads;
353 pkt->l3_len = sizeof(struct ip6_hdr);
354 pkt->l2_len = ETHER_HDR_LEN;
356 ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
359 memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
360 sizeof(struct ether_addr));
361 memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
362 sizeof(struct ether_addr));
366 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
367 const struct lcore_conf *qconf)
370 const int32_t prefetch_offset = 2;
372 for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
373 rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
374 prepare_tx_pkt(pkts[i], port, qconf);
376 /* Process left packets */
377 for (; i < nb_pkts; i++)
378 prepare_tx_pkt(pkts[i], port, qconf);
381 /* Send burst of packets on an output interface */
382 static inline int32_t
383 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
385 struct rte_mbuf **m_table;
389 queueid = qconf->tx_queue_id[port];
390 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
392 prepare_tx_burst(m_table, n, port, qconf);
394 ret = rte_eth_tx_burst(port, queueid, m_table, n);
395 if (unlikely(ret < n)) {
397 rte_pktmbuf_free(m_table[ret]);
404 /* Enqueue a single packet, and send burst if queue is filled */
405 static inline int32_t
406 send_single_packet(struct rte_mbuf *m, uint16_t port)
410 struct lcore_conf *qconf;
412 lcore_id = rte_lcore_id();
414 qconf = &lcore_conf[lcore_id];
415 len = qconf->tx_mbufs[port].len;
416 qconf->tx_mbufs[port].m_table[len] = m;
419 /* enough pkts to be sent */
420 if (unlikely(len == MAX_PKT_BURST)) {
421 send_burst(qconf, MAX_PKT_BURST, port);
425 qconf->tx_mbufs[port].len = len;
430 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
434 uint32_t i, j, res, sa_idx;
436 if (ip->num == 0 || sp == NULL)
439 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
440 ip->num, DEFAULT_MAX_CATEGORIES);
443 for (i = 0; i < ip->num; i++) {
455 /* Only check SPI match for processed IPSec packets */
456 if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
461 sa_idx = ip->res[i] & PROTECT_MASK;
462 if (sa_idx >= IPSEC_SA_MAX_ENTRIES ||
463 !inbound_sa_check(sa, m, sa_idx)) {
473 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
482 for (i = 0; i < num; i++) {
485 ip = rte_pktmbuf_mtod(m, struct ip *);
487 if (ip->ip_v == IPVERSION) {
488 trf->ip4.pkts[n4] = m;
489 trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
490 uint8_t *, offsetof(struct ip, ip_p));
492 } else if (ip->ip_v == IP6_VERSION) {
493 trf->ip6.pkts[n6] = m;
494 trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
496 offsetof(struct ip6_hdr, ip6_nxt));
508 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
509 struct ipsec_traffic *traffic)
511 uint16_t nb_pkts_in, n_ip4, n_ip6;
513 n_ip4 = traffic->ip4.num;
514 n_ip6 = traffic->ip6.num;
516 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
517 traffic->ipsec.num, MAX_PKT_BURST);
519 split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
521 inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
524 inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
529 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
530 struct traffic_type *ipsec)
533 uint32_t i, j, sa_idx;
535 if (ip->num == 0 || sp == NULL)
538 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
539 ip->num, DEFAULT_MAX_CATEGORIES);
542 for (i = 0; i < ip->num; i++) {
544 sa_idx = ip->res[i] & PROTECT_MASK;
545 if (ip->res[i] & DISCARD)
547 else if (ip->res[i] & BYPASS)
549 else if (sa_idx < IPSEC_SA_MAX_ENTRIES) {
550 ipsec->res[ipsec->num] = sa_idx;
551 ipsec->pkts[ipsec->num++] = m;
552 } else /* invalid SA idx */
559 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
560 struct ipsec_traffic *traffic)
563 uint16_t idx, nb_pkts_out, i;
565 /* Drop any IPsec traffic from protected ports */
566 for (i = 0; i < traffic->ipsec.num; i++)
567 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
569 traffic->ipsec.num = 0;
571 outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
573 outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
575 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
576 traffic->ipsec.res, traffic->ipsec.num,
579 for (i = 0; i < nb_pkts_out; i++) {
580 m = traffic->ipsec.pkts[i];
581 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
582 if (ip->ip_v == IPVERSION) {
583 idx = traffic->ip4.num++;
584 traffic->ip4.pkts[idx] = m;
586 idx = traffic->ip6.num++;
587 traffic->ip6.pkts[idx] = m;
593 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
594 struct ipsec_traffic *traffic)
597 uint32_t nb_pkts_in, i, idx;
599 /* Drop any IPv4 traffic from unprotected ports */
600 for (i = 0; i < traffic->ip4.num; i++)
601 rte_pktmbuf_free(traffic->ip4.pkts[i]);
603 traffic->ip4.num = 0;
605 /* Drop any IPv6 traffic from unprotected ports */
606 for (i = 0; i < traffic->ip6.num; i++)
607 rte_pktmbuf_free(traffic->ip6.pkts[i]);
609 traffic->ip6.num = 0;
611 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
612 traffic->ipsec.num, MAX_PKT_BURST);
614 for (i = 0; i < nb_pkts_in; i++) {
615 m = traffic->ipsec.pkts[i];
616 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
617 if (ip->ip_v == IPVERSION) {
618 idx = traffic->ip4.num++;
619 traffic->ip4.pkts[idx] = m;
621 idx = traffic->ip6.num++;
622 traffic->ip6.pkts[idx] = m;
628 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
629 struct ipsec_traffic *traffic)
632 uint32_t nb_pkts_out, i, n;
635 /* Drop any IPsec traffic from protected ports */
636 for (i = 0; i < traffic->ipsec.num; i++)
637 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
641 for (i = 0; i < traffic->ip4.num; i++) {
642 traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
643 traffic->ipsec.res[n++] = single_sa_idx;
646 for (i = 0; i < traffic->ip6.num; i++) {
647 traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
648 traffic->ipsec.res[n++] = single_sa_idx;
651 traffic->ip4.num = 0;
652 traffic->ip6.num = 0;
653 traffic->ipsec.num = n;
655 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
656 traffic->ipsec.res, traffic->ipsec.num,
659 /* They all sue the same SA (ip4 or ip6 tunnel) */
660 m = traffic->ipsec.pkts[i];
661 ip = rte_pktmbuf_mtod(m, struct ip *);
662 if (ip->ip_v == IPVERSION) {
663 traffic->ip4.num = nb_pkts_out;
664 for (i = 0; i < nb_pkts_out; i++)
665 traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
667 traffic->ip6.num = nb_pkts_out;
668 for (i = 0; i < nb_pkts_out; i++)
669 traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
673 static inline int32_t
674 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
676 struct ipsec_mbuf_metadata *priv;
679 priv = get_priv(pkt);
682 if (unlikely(sa == NULL)) {
683 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
691 return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
702 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
704 uint32_t hop[MAX_PKT_BURST * 2];
705 uint32_t dst_ip[MAX_PKT_BURST * 2];
708 uint16_t lpm_pkts = 0;
713 /* Need to do an LPM lookup for non-inline packets. Inline packets will
714 * have port ID in the SA
717 for (i = 0; i < nb_pkts; i++) {
718 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
719 /* Security offload not enabled. So an LPM lookup is
720 * required to get the hop
722 offset = offsetof(struct ip, ip_dst);
723 dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
725 dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
730 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
734 for (i = 0; i < nb_pkts; i++) {
735 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
736 /* Read hop from the SA */
737 pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
739 /* Need to use hop returned by lookup */
740 pkt_hop = hop[lpm_pkts++];
743 if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
744 rte_pktmbuf_free(pkts[i]);
747 send_single_packet(pkts[i], pkt_hop & 0xff);
752 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
754 int32_t hop[MAX_PKT_BURST * 2];
755 uint8_t dst_ip[MAX_PKT_BURST * 2][16];
759 uint16_t lpm_pkts = 0;
764 /* Need to do an LPM lookup for non-inline packets. Inline packets will
765 * have port ID in the SA
768 for (i = 0; i < nb_pkts; i++) {
769 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
770 /* Security offload not enabled. So an LPM lookup is
771 * required to get the hop
773 offset = offsetof(struct ip6_hdr, ip6_dst);
774 ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
776 memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
781 rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
786 for (i = 0; i < nb_pkts; i++) {
787 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
788 /* Read hop from the SA */
789 pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
791 /* Need to use hop returned by lookup */
792 pkt_hop = hop[lpm_pkts++];
796 rte_pktmbuf_free(pkts[i]);
799 send_single_packet(pkts[i], pkt_hop & 0xff);
804 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
805 uint8_t nb_pkts, uint16_t portid)
807 struct ipsec_traffic traffic;
809 prepare_traffic(pkts, &traffic, nb_pkts);
811 if (unlikely(single_sa)) {
812 if (UNPROTECTED_PORT(portid))
813 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
815 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
817 if (UNPROTECTED_PORT(portid))
818 process_pkts_inbound(&qconf->inbound, &traffic);
820 process_pkts_outbound(&qconf->outbound, &traffic);
823 route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
824 route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
828 drain_tx_buffers(struct lcore_conf *qconf)
833 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
834 buf = &qconf->tx_mbufs[portid];
837 send_burst(qconf, buf->len, portid);
843 drain_crypto_buffers(struct lcore_conf *qconf)
846 struct ipsec_ctx *ctx;
848 /* drain inbound buffers*/
849 ctx = &qconf->inbound;
850 for (i = 0; i != ctx->nb_qps; i++) {
851 if (ctx->tbl[i].len != 0)
852 enqueue_cop_burst(ctx->tbl + i);
855 /* drain outbound buffers*/
856 ctx = &qconf->outbound;
857 for (i = 0; i != ctx->nb_qps; i++) {
858 if (ctx->tbl[i].len != 0)
859 enqueue_cop_burst(ctx->tbl + i);
864 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
865 struct ipsec_ctx *ctx)
868 struct ipsec_traffic trf;
870 /* dequeue packets from crypto-queue */
871 n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
872 RTE_DIM(trf.ipsec.pkts));
879 /* split traffic by ipv4-ipv6 */
880 split46_traffic(&trf, trf.ipsec.pkts, n);
882 /* process ipv4 packets */
883 inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
884 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
886 /* process ipv6 packets */
887 inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
888 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
892 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
893 struct ipsec_ctx *ctx)
896 struct ipsec_traffic trf;
898 /* dequeue packets from crypto-queue */
899 n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
900 RTE_DIM(trf.ipsec.pkts));
907 /* split traffic by ipv4-ipv6 */
908 split46_traffic(&trf, trf.ipsec.pkts, n);
910 /* process ipv4 packets */
911 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
913 /* process ipv6 packets */
914 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
917 /* main processing loop */
919 main_loop(__attribute__((unused)) void *dummy)
921 struct rte_mbuf *pkts[MAX_PKT_BURST];
923 uint64_t prev_tsc, diff_tsc, cur_tsc;
927 struct lcore_conf *qconf;
929 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
930 / US_PER_S * BURST_TX_DRAIN_US;
931 struct lcore_rx_queue *rxql;
934 lcore_id = rte_lcore_id();
935 qconf = &lcore_conf[lcore_id];
936 rxql = qconf->rx_queue_list;
937 socket_id = rte_lcore_to_socket_id(lcore_id);
939 qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
940 qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
941 qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
942 qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
943 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
944 qconf->inbound.cdev_map = cdev_map_in;
945 qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
946 qconf->inbound.session_priv_pool =
947 socket_ctx[socket_id].session_priv_pool;
948 qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
949 qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
950 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
951 qconf->outbound.cdev_map = cdev_map_out;
952 qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
953 qconf->outbound.session_priv_pool =
954 socket_ctx[socket_id].session_priv_pool;
956 if (qconf->nb_rx_queue == 0) {
957 RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
961 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
963 for (i = 0; i < qconf->nb_rx_queue; i++) {
964 portid = rxql[i].port_id;
965 queueid = rxql[i].queue_id;
967 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
968 lcore_id, portid, queueid);
972 cur_tsc = rte_rdtsc();
974 /* TX queue buffer drain */
975 diff_tsc = cur_tsc - prev_tsc;
977 if (unlikely(diff_tsc > drain_tsc)) {
978 drain_tx_buffers(qconf);
979 drain_crypto_buffers(qconf);
983 for (i = 0; i < qconf->nb_rx_queue; ++i) {
985 /* Read packets from RX queues */
986 portid = rxql[i].port_id;
987 queueid = rxql[i].queue_id;
988 nb_rx = rte_eth_rx_burst(portid, queueid,
989 pkts, MAX_PKT_BURST);
992 process_pkts(qconf, pkts, nb_rx, portid);
994 /* dequeue and process completed crypto-ops */
995 if (UNPROTECTED_PORT(portid))
996 drain_inbound_crypto_queues(qconf,
999 drain_outbound_crypto_queues(qconf,
1013 if (lcore_params == NULL) {
1014 printf("Error: No port/queue/core mappings\n");
1018 for (i = 0; i < nb_lcore_params; ++i) {
1019 lcore = lcore_params[i].lcore_id;
1020 if (!rte_lcore_is_enabled(lcore)) {
1021 printf("error: lcore %hhu is not enabled in "
1022 "lcore mask\n", lcore);
1025 socket_id = rte_lcore_to_socket_id(lcore);
1026 if (socket_id != 0 && numa_on == 0) {
1027 printf("warning: lcore %hhu is on socket %d "
1031 portid = lcore_params[i].port_id;
1032 if ((enabled_port_mask & (1 << portid)) == 0) {
1033 printf("port %u is not enabled in port mask\n", portid);
1036 if (!rte_eth_dev_is_valid_port(portid)) {
1037 printf("port %u is not present on the board\n", portid);
1045 get_port_nb_rx_queues(const uint16_t port)
1050 for (i = 0; i < nb_lcore_params; ++i) {
1051 if (lcore_params[i].port_id == port &&
1052 lcore_params[i].queue_id > queue)
1053 queue = lcore_params[i].queue_id;
1055 return (uint8_t)(++queue);
1059 init_lcore_rx_queues(void)
1061 uint16_t i, nb_rx_queue;
1064 for (i = 0; i < nb_lcore_params; ++i) {
1065 lcore = lcore_params[i].lcore_id;
1066 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
1067 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1068 printf("error: too many queues (%u) for lcore: %u\n",
1069 nb_rx_queue + 1, lcore);
1072 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1073 lcore_params[i].port_id;
1074 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1075 lcore_params[i].queue_id;
1076 lcore_conf[lcore].nb_rx_queue++;
1083 print_usage(const char *prgname)
1085 fprintf(stderr, "%s [EAL options] --"
1091 " --config (port,queue,lcore)[,(port,queue,lcore)]"
1092 " [--single-sa SAIDX]"
1093 " [--cryptodev_mask MASK]"
1094 " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
1095 " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
1097 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
1098 " -P : Enable promiscuous mode\n"
1099 " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
1100 " -j FRAMESIZE: Enable jumbo frame with 'FRAMESIZE' as maximum\n"
1102 " -f CONFIG_FILE: Configuration file\n"
1103 " --config (port,queue,lcore): Rx queue configuration\n"
1104 " --single-sa SAIDX: Use single SA index for outbound traffic,\n"
1105 " bypassing the SP\n"
1106 " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
1107 " devices to configure\n"
1108 " --" CMD_LINE_OPT_RX_OFFLOAD
1109 ": bitmask of the RX HW offload capabilities to enable/use\n"
1110 " (DEV_RX_OFFLOAD_*)\n"
1111 " --" CMD_LINE_OPT_TX_OFFLOAD
1112 ": bitmask of the TX HW offload capabilities to enable/use\n"
1113 " (DEV_TX_OFFLOAD_*)\n"
1119 parse_mask(const char *str, uint64_t *val)
1125 t = strtoul(str, &end, 0);
1126 if (errno != 0 || end[0] != 0)
1134 parse_portmask(const char *portmask)
1139 /* parse hexadecimal string */
1140 pm = strtoul(portmask, &end, 16);
1141 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1144 if ((pm == 0) && errno)
1151 parse_decimal(const char *str)
1156 num = strtoul(str, &end, 10);
1157 if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
1164 parse_config(const char *q_arg)
1167 const char *p, *p0 = q_arg;
1175 unsigned long int_fld[_NUM_FLD];
1176 char *str_fld[_NUM_FLD];
1180 nb_lcore_params = 0;
1182 while ((p = strchr(p0, '(')) != NULL) {
1184 p0 = strchr(p, ')');
1189 if (size >= sizeof(s))
1192 snprintf(s, sizeof(s), "%.*s", size, p);
1193 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1196 for (i = 0; i < _NUM_FLD; i++) {
1198 int_fld[i] = strtoul(str_fld[i], &end, 0);
1199 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1202 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1203 printf("exceeded max number of lcore params: %hu\n",
1207 lcore_params_array[nb_lcore_params].port_id =
1208 (uint8_t)int_fld[FLD_PORT];
1209 lcore_params_array[nb_lcore_params].queue_id =
1210 (uint8_t)int_fld[FLD_QUEUE];
1211 lcore_params_array[nb_lcore_params].lcore_id =
1212 (uint8_t)int_fld[FLD_LCORE];
1215 lcore_params = lcore_params_array;
1220 parse_args(int32_t argc, char **argv)
1224 int32_t option_index;
1225 char *prgname = argv[0];
1226 int32_t f_present = 0;
1230 while ((opt = getopt_long(argc, argvopt, "p:Pu:f:j:",
1231 lgopts, &option_index)) != EOF) {
1235 enabled_port_mask = parse_portmask(optarg);
1236 if (enabled_port_mask == 0) {
1237 printf("invalid portmask\n");
1238 print_usage(prgname);
1243 printf("Promiscuous mode selected\n");
1247 unprotected_port_mask = parse_portmask(optarg);
1248 if (unprotected_port_mask == 0) {
1249 printf("invalid unprotected portmask\n");
1250 print_usage(prgname);
1255 if (f_present == 1) {
1256 printf("\"-f\" option present more than "
1258 print_usage(prgname);
1261 if (parse_cfg_file(optarg) < 0) {
1262 printf("parsing file \"%s\" failed\n",
1264 print_usage(prgname);
1271 int32_t size = parse_decimal(optarg);
1273 printf("Invalid jumbo frame size\n");
1275 print_usage(prgname);
1278 printf("Using default value 9000\n");
1284 printf("Enabled jumbo frames size %u\n", frame_size);
1286 case CMD_LINE_OPT_CONFIG_NUM:
1287 ret = parse_config(optarg);
1289 printf("Invalid config\n");
1290 print_usage(prgname);
1294 case CMD_LINE_OPT_SINGLE_SA_NUM:
1295 ret = parse_decimal(optarg);
1297 printf("Invalid argument[sa_idx]\n");
1298 print_usage(prgname);
1304 single_sa_idx = ret;
1305 printf("Configured with single SA index %u\n",
1308 case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1309 ret = parse_portmask(optarg);
1311 printf("Invalid argument[portmask]\n");
1312 print_usage(prgname);
1317 enabled_cryptodev_mask = ret;
1319 case CMD_LINE_OPT_RX_OFFLOAD_NUM:
1320 ret = parse_mask(optarg, &dev_rx_offload);
1322 printf("Invalid argument for \'%s\': %s\n",
1323 CMD_LINE_OPT_RX_OFFLOAD, optarg);
1324 print_usage(prgname);
1328 case CMD_LINE_OPT_TX_OFFLOAD_NUM:
1329 ret = parse_mask(optarg, &dev_tx_offload);
1331 printf("Invalid argument for \'%s\': %s\n",
1332 CMD_LINE_OPT_TX_OFFLOAD, optarg);
1333 print_usage(prgname);
1338 print_usage(prgname);
1343 if (f_present == 0) {
1344 printf("Mandatory option \"-f\" not present\n");
1349 argv[optind-1] = prgname;
1352 optind = 1; /* reset getopt lib */
1357 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
1359 char buf[ETHER_ADDR_FMT_SIZE];
1360 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
1361 printf("%s%s", name, buf);
1365 * Update destination ethaddr for the port.
1368 add_dst_ethaddr(uint16_t port, const struct ether_addr *addr)
1370 if (port > RTE_DIM(ethaddr_tbl))
1373 ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr);
1377 /* Check the link status of all ports in up to 9s, and print them finally */
1379 check_all_ports_link_status(uint32_t port_mask)
1381 #define CHECK_INTERVAL 100 /* 100ms */
1382 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1384 uint8_t count, all_ports_up, print_flag = 0;
1385 struct rte_eth_link link;
1387 printf("\nChecking link status");
1389 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1391 RTE_ETH_FOREACH_DEV(portid) {
1392 if ((port_mask & (1 << portid)) == 0)
1394 memset(&link, 0, sizeof(link));
1395 rte_eth_link_get_nowait(portid, &link);
1396 /* print link status if flag set */
1397 if (print_flag == 1) {
1398 if (link.link_status)
1400 "Port%d Link Up - speed %u Mbps -%s\n",
1401 portid, link.link_speed,
1402 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1403 ("full-duplex") : ("half-duplex\n"));
1405 printf("Port %d Link Down\n", portid);
1408 /* clear all_ports_up flag if any link down */
1409 if (link.link_status == ETH_LINK_DOWN) {
1414 /* after finally printing all link status, get out */
1415 if (print_flag == 1)
1418 if (all_ports_up == 0) {
1421 rte_delay_ms(CHECK_INTERVAL);
1424 /* set the print_flag if all ports up or timeout */
1425 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1433 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1434 uint16_t qp, struct lcore_params *params,
1435 struct ipsec_ctx *ipsec_ctx,
1436 const struct rte_cryptodev_capabilities *cipher,
1437 const struct rte_cryptodev_capabilities *auth,
1438 const struct rte_cryptodev_capabilities *aead)
1442 struct cdev_key key = { 0 };
1444 key.lcore_id = params->lcore_id;
1446 key.cipher_algo = cipher->sym.cipher.algo;
1448 key.auth_algo = auth->sym.auth.algo;
1450 key.aead_algo = aead->sym.aead.algo;
1452 ret = rte_hash_lookup(map, &key);
1456 for (i = 0; i < ipsec_ctx->nb_qps; i++)
1457 if (ipsec_ctx->tbl[i].id == cdev_id)
1460 if (i == ipsec_ctx->nb_qps) {
1461 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1462 printf("Maximum number of crypto devices assigned to "
1463 "a core, increase MAX_QP_PER_LCORE value\n");
1466 ipsec_ctx->tbl[i].id = cdev_id;
1467 ipsec_ctx->tbl[i].qp = qp;
1468 ipsec_ctx->nb_qps++;
1469 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1470 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1474 ret = rte_hash_add_key_data(map, &key, (void *)i);
1476 printf("Faled to insert cdev mapping for (lcore %u, "
1477 "cdev %u, qp %u), errno %d\n",
1478 key.lcore_id, ipsec_ctx->tbl[i].id,
1479 ipsec_ctx->tbl[i].qp, ret);
1487 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1488 uint16_t qp, struct lcore_params *params)
1491 const struct rte_cryptodev_capabilities *i, *j;
1492 struct rte_hash *map;
1493 struct lcore_conf *qconf;
1494 struct ipsec_ctx *ipsec_ctx;
1497 qconf = &lcore_conf[params->lcore_id];
1499 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1501 ipsec_ctx = &qconf->outbound;
1505 ipsec_ctx = &qconf->inbound;
1509 /* Required cryptodevs with operation chainning */
1510 if (!(dev_info->feature_flags &
1511 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1514 for (i = dev_info->capabilities;
1515 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1516 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1519 if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1520 ret |= add_mapping(map, str, cdev_id, qp, params,
1521 ipsec_ctx, NULL, NULL, i);
1525 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1528 for (j = dev_info->capabilities;
1529 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1530 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1533 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1536 ret |= add_mapping(map, str, cdev_id, qp, params,
1537 ipsec_ctx, i, j, NULL);
1544 /* Check if the device is enabled by cryptodev_mask */
1546 check_cryptodev_mask(uint8_t cdev_id)
1548 if (enabled_cryptodev_mask & (1 << cdev_id))
1555 cryptodevs_init(void)
1557 struct rte_cryptodev_config dev_conf;
1558 struct rte_cryptodev_qp_conf qp_conf;
1559 uint16_t idx, max_nb_qps, qp, i;
1560 int16_t cdev_id, port_id;
1561 struct rte_hash_parameters params = { 0 };
1563 params.entries = CDEV_MAP_ENTRIES;
1564 params.key_len = sizeof(struct cdev_key);
1565 params.hash_func = rte_jhash;
1566 params.hash_func_init_val = 0;
1567 params.socket_id = rte_socket_id();
1569 params.name = "cdev_map_in";
1570 cdev_map_in = rte_hash_create(¶ms);
1571 if (cdev_map_in == NULL)
1572 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1575 params.name = "cdev_map_out";
1576 cdev_map_out = rte_hash_create(¶ms);
1577 if (cdev_map_out == NULL)
1578 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1581 printf("lcore/cryptodev/qp mappings:\n");
1583 uint32_t max_sess_sz = 0, sess_sz;
1584 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1587 /* Get crypto priv session size */
1588 sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
1589 if (sess_sz > max_sess_sz)
1590 max_sess_sz = sess_sz;
1593 * If crypto device is security capable, need to check the
1594 * size of security session as well.
1597 /* Get security context of the crypto device */
1598 sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
1599 if (sec_ctx == NULL)
1602 /* Get size of security session */
1603 sess_sz = rte_security_session_get_size(sec_ctx);
1604 if (sess_sz > max_sess_sz)
1605 max_sess_sz = sess_sz;
1607 RTE_ETH_FOREACH_DEV(port_id) {
1610 if ((enabled_port_mask & (1 << port_id)) == 0)
1613 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
1614 if (sec_ctx == NULL)
1617 sess_sz = rte_security_session_get_size(sec_ctx);
1618 if (sess_sz > max_sess_sz)
1619 max_sess_sz = sess_sz;
1623 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1624 struct rte_cryptodev_info cdev_info;
1626 if (check_cryptodev_mask((uint8_t)cdev_id))
1629 rte_cryptodev_info_get(cdev_id, &cdev_info);
1631 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1632 max_nb_qps = cdev_info.max_nb_queue_pairs;
1634 max_nb_qps = nb_lcore_params;
1638 while (qp < max_nb_qps && i < nb_lcore_params) {
1639 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1640 &lcore_params[idx]))
1643 idx = idx % nb_lcore_params;
1650 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1651 dev_conf.nb_queue_pairs = qp;
1653 uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
1654 if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS)
1655 rte_exit(EXIT_FAILURE,
1656 "Device does not support at least %u "
1657 "sessions", CDEV_MP_NB_OBJS);
1659 if (!socket_ctx[dev_conf.socket_id].session_pool) {
1660 char mp_name[RTE_MEMPOOL_NAMESIZE];
1661 struct rte_mempool *sess_mp;
1663 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1664 "sess_mp_%u", dev_conf.socket_id);
1665 sess_mp = rte_cryptodev_sym_session_pool_create(
1666 mp_name, CDEV_MP_NB_OBJS,
1667 0, CDEV_MP_CACHE_SZ, 0,
1668 dev_conf.socket_id);
1669 socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
1672 if (!socket_ctx[dev_conf.socket_id].session_priv_pool) {
1673 char mp_name[RTE_MEMPOOL_NAMESIZE];
1674 struct rte_mempool *sess_mp;
1676 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1677 "sess_mp_priv_%u", dev_conf.socket_id);
1678 sess_mp = rte_mempool_create(mp_name,
1682 0, NULL, NULL, NULL,
1683 NULL, dev_conf.socket_id,
1685 socket_ctx[dev_conf.socket_id].session_priv_pool =
1689 if (!socket_ctx[dev_conf.socket_id].session_priv_pool ||
1690 !socket_ctx[dev_conf.socket_id].session_pool)
1691 rte_exit(EXIT_FAILURE,
1692 "Cannot create session pool on socket %d\n",
1693 dev_conf.socket_id);
1695 printf("Allocated session pool on socket %d\n",
1696 dev_conf.socket_id);
1698 if (rte_cryptodev_configure(cdev_id, &dev_conf))
1699 rte_panic("Failed to initialize cryptodev %u\n",
1702 qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
1703 qp_conf.mp_session =
1704 socket_ctx[dev_conf.socket_id].session_pool;
1705 qp_conf.mp_session_private =
1706 socket_ctx[dev_conf.socket_id].session_priv_pool;
1707 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1708 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1709 &qp_conf, dev_conf.socket_id))
1710 rte_panic("Failed to setup queue %u for "
1711 "cdev_id %u\n", 0, cdev_id);
1713 if (rte_cryptodev_start(cdev_id))
1714 rte_panic("Failed to start cryptodev %u\n",
1718 /* create session pools for eth devices that implement security */
1719 RTE_ETH_FOREACH_DEV(port_id) {
1720 if ((enabled_port_mask & (1 << port_id)) &&
1721 rte_eth_dev_get_sec_ctx(port_id)) {
1722 int socket_id = rte_eth_dev_socket_id(port_id);
1724 if (!socket_ctx[socket_id].session_pool) {
1725 char mp_name[RTE_MEMPOOL_NAMESIZE];
1726 struct rte_mempool *sess_mp;
1728 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1729 "sess_mp_%u", socket_id);
1730 sess_mp = rte_mempool_create(mp_name,
1731 (CDEV_MP_NB_OBJS * 2),
1734 0, NULL, NULL, NULL,
1737 if (sess_mp == NULL)
1738 rte_exit(EXIT_FAILURE,
1739 "Cannot create session pool "
1740 "on socket %d\n", socket_id);
1742 printf("Allocated session pool "
1743 "on socket %d\n", socket_id);
1744 socket_ctx[socket_id].session_pool = sess_mp;
1756 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
1758 struct rte_eth_dev_info dev_info;
1759 struct rte_eth_txconf *txconf;
1760 uint16_t nb_tx_queue, nb_rx_queue;
1761 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1762 int32_t ret, socket_id;
1763 struct lcore_conf *qconf;
1764 struct ether_addr ethaddr;
1765 struct rte_eth_conf local_port_conf = port_conf;
1767 rte_eth_dev_info_get(portid, &dev_info);
1769 /* limit allowed HW offloafs, as user requested */
1770 dev_info.rx_offload_capa &= dev_rx_offload;
1771 dev_info.tx_offload_capa &= dev_tx_offload;
1773 printf("Configuring device port %u:\n", portid);
1775 rte_eth_macaddr_get(portid, ðaddr);
1776 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr);
1777 print_ethaddr("Address: ", ðaddr);
1780 nb_rx_queue = get_port_nb_rx_queues(portid);
1781 nb_tx_queue = nb_lcores;
1783 if (nb_rx_queue > dev_info.max_rx_queues)
1784 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1785 "(max rx queue is %u)\n",
1786 nb_rx_queue, dev_info.max_rx_queues);
1788 if (nb_tx_queue > dev_info.max_tx_queues)
1789 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1790 "(max tx queue is %u)\n",
1791 nb_tx_queue, dev_info.max_tx_queues);
1793 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1794 nb_rx_queue, nb_tx_queue);
1797 local_port_conf.rxmode.max_rx_pkt_len = frame_size;
1798 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1801 local_port_conf.rxmode.offloads |= req_rx_offloads;
1802 local_port_conf.txmode.offloads |= req_tx_offloads;
1804 /* Check that all required capabilities are supported */
1805 if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1806 local_port_conf.rxmode.offloads)
1807 rte_exit(EXIT_FAILURE,
1808 "Error: port %u required RX offloads: 0x%" PRIx64
1809 ", avaialbe RX offloads: 0x%" PRIx64 "\n",
1810 portid, local_port_conf.rxmode.offloads,
1811 dev_info.rx_offload_capa);
1813 if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1814 local_port_conf.txmode.offloads)
1815 rte_exit(EXIT_FAILURE,
1816 "Error: port %u required TX offloads: 0x%" PRIx64
1817 ", avaialbe TX offloads: 0x%" PRIx64 "\n",
1818 portid, local_port_conf.txmode.offloads,
1819 dev_info.tx_offload_capa);
1821 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1822 local_port_conf.txmode.offloads |=
1823 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1825 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
1826 local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
1828 printf("port %u configurng rx_offloads=0x%" PRIx64
1829 ", tx_offloads=0x%" PRIx64 "\n",
1830 portid, local_port_conf.rxmode.offloads,
1831 local_port_conf.txmode.offloads);
1833 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1834 dev_info.flow_type_rss_offloads;
1835 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1836 port_conf.rx_adv_conf.rss_conf.rss_hf) {
1837 printf("Port %u modified RSS hash function based on hardware support,"
1838 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
1840 port_conf.rx_adv_conf.rss_conf.rss_hf,
1841 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1844 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
1847 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1848 "err=%d, port=%d\n", ret, portid);
1850 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
1852 rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
1853 "err=%d, port=%d\n", ret, portid);
1855 /* init one TX queue per lcore */
1857 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1858 if (rte_lcore_is_enabled(lcore_id) == 0)
1862 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1867 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
1869 txconf = &dev_info.default_txconf;
1870 txconf->offloads = local_port_conf.txmode.offloads;
1872 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
1875 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1876 "err=%d, port=%d\n", ret, portid);
1878 qconf = &lcore_conf[lcore_id];
1879 qconf->tx_queue_id[portid] = tx_queueid;
1881 /* Pre-populate pkt offloads based on capabilities */
1882 qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
1883 qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
1884 if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
1885 qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
1889 /* init RX queues */
1890 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
1891 struct rte_eth_rxconf rxq_conf;
1893 if (portid != qconf->rx_queue_list[queue].port_id)
1896 rx_queueid = qconf->rx_queue_list[queue].queue_id;
1898 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
1901 rxq_conf = dev_info.default_rxconf;
1902 rxq_conf.offloads = local_port_conf.rxmode.offloads;
1903 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
1904 nb_rxd, socket_id, &rxq_conf,
1905 socket_ctx[socket_id].mbuf_pool);
1907 rte_exit(EXIT_FAILURE,
1908 "rte_eth_rx_queue_setup: err=%d, "
1909 "port=%d\n", ret, portid);
1916 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
1919 uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) :
1920 RTE_MBUF_DEFAULT_BUF_SIZE;
1923 snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
1924 ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
1925 MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
1928 if (ctx->mbuf_pool == NULL)
1929 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
1932 printf("Allocated mbuf pool on socket %d\n", socket_id);
1936 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
1938 struct ipsec_sa *sa;
1940 /* For inline protocol processing, the metadata in the event will
1941 * uniquely identify the security session which raised the event.
1942 * Application would then need the userdata it had registered with the
1943 * security session to process the event.
1946 sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
1949 /* userdata could not be retrieved */
1953 /* Sequence number over flow. SA need to be re-established */
1959 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
1960 void *param, void *ret_param)
1963 struct rte_eth_event_ipsec_desc *event_desc = NULL;
1964 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
1965 rte_eth_dev_get_sec_ctx(port_id);
1967 RTE_SET_USED(param);
1969 if (type != RTE_ETH_EVENT_IPSEC)
1972 event_desc = ret_param;
1973 if (event_desc == NULL) {
1974 printf("Event descriptor not set\n");
1978 md = event_desc->metadata;
1980 if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
1981 return inline_ipsec_event_esn_overflow(ctx, md);
1982 else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
1983 printf("Invalid IPsec event reported\n");
1991 main(int32_t argc, char **argv)
1997 uint64_t req_rx_offloads, req_tx_offloads;
2000 ret = rte_eal_init(argc, argv);
2002 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2006 /* parse application arguments (after the EAL ones) */
2007 ret = parse_args(argc, argv);
2009 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
2011 if ((unprotected_port_mask & enabled_port_mask) !=
2012 unprotected_port_mask)
2013 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
2014 unprotected_port_mask);
2016 if (check_params() < 0)
2017 rte_exit(EXIT_FAILURE, "check_params failed\n");
2019 ret = init_lcore_rx_queues();
2021 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2023 nb_lcores = rte_lcore_count();
2025 /* Replicate each context per socket */
2026 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2027 if (rte_lcore_is_enabled(lcore_id) == 0)
2031 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2035 if (socket_ctx[socket_id].mbuf_pool)
2038 sa_init(&socket_ctx[socket_id], socket_id);
2040 sp4_init(&socket_ctx[socket_id], socket_id);
2042 sp6_init(&socket_ctx[socket_id], socket_id);
2044 rt_init(&socket_ctx[socket_id], socket_id);
2046 pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
2049 RTE_ETH_FOREACH_DEV(portid) {
2050 if ((enabled_port_mask & (1 << portid)) == 0)
2053 sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads);
2054 port_init(portid, req_rx_offloads, req_tx_offloads);
2060 RTE_ETH_FOREACH_DEV(portid) {
2061 if ((enabled_port_mask & (1 << portid)) == 0)
2065 ret = rte_eth_dev_start(portid);
2067 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
2068 "err=%d, port=%d\n", ret, portid);
2070 * If enabled, put device in promiscuous mode.
2071 * This allows IO forwarding mode to forward packets
2072 * to itself through 2 cross-connected ports of the
2076 rte_eth_promiscuous_enable(portid);
2078 rte_eth_dev_callback_register(portid,
2079 RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
2082 check_all_ports_link_status(enabled_port_mask);
2084 /* launch per-lcore init on every lcore */
2085 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2086 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2087 if (rte_eal_wait_lcore(lcore_id) < 0)