1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
10 #include <sys/types.h>
11 #include <netinet/in.h>
12 #include <netinet/ip.h>
13 #include <netinet/ip6.h>
15 #include <sys/queue.h>
21 #include <rte_common.h>
22 #include <rte_bitmap.h>
23 #include <rte_byteorder.h>
26 #include <rte_launch.h>
27 #include <rte_atomic.h>
28 #include <rte_cycles.h>
29 #include <rte_prefetch.h>
30 #include <rte_lcore.h>
31 #include <rte_per_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_interrupts.h>
34 #include <rte_random.h>
35 #include <rte_debug.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_mempool.h>
44 #include <rte_jhash.h>
45 #include <rte_cryptodev.h>
46 #include <rte_security.h>
47 #include <rte_eventdev.h>
49 #include <rte_ip_frag.h>
51 #include "event_helper.h"
56 volatile bool force_quit;
58 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
60 #define MAX_JUMBO_PKT_LEN 9600
62 #define MEMPOOL_CACHE_SIZE 256
64 #define NB_MBUF (32000)
66 #define CDEV_QUEUE_DESC 2048
67 #define CDEV_MAP_ENTRIES 16384
68 #define CDEV_MP_NB_OBJS 1024
69 #define CDEV_MP_CACHE_SZ 64
70 #define MAX_QUEUE_PAIRS 1
72 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
76 /* Configure how many packets ahead to prefetch, when reading packets */
77 #define PREFETCH_OFFSET 3
79 #define MAX_RX_QUEUE_PER_LCORE 16
81 #define MAX_LCORE_PARAMS 1024
83 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
86 * Configurable number of RX/TX ring descriptors
88 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
89 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
90 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
91 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
93 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
94 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
95 (((uint64_t)((a) & 0xff) << 56) | \
96 ((uint64_t)((b) & 0xff) << 48) | \
97 ((uint64_t)((c) & 0xff) << 40) | \
98 ((uint64_t)((d) & 0xff) << 32) | \
99 ((uint64_t)((e) & 0xff) << 24) | \
100 ((uint64_t)((f) & 0xff) << 16) | \
101 ((uint64_t)((g) & 0xff) << 8) | \
102 ((uint64_t)(h) & 0xff))
104 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
105 (((uint64_t)((h) & 0xff) << 56) | \
106 ((uint64_t)((g) & 0xff) << 48) | \
107 ((uint64_t)((f) & 0xff) << 40) | \
108 ((uint64_t)((e) & 0xff) << 32) | \
109 ((uint64_t)((d) & 0xff) << 24) | \
110 ((uint64_t)((c) & 0xff) << 16) | \
111 ((uint64_t)((b) & 0xff) << 8) | \
112 ((uint64_t)(a) & 0xff))
114 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
116 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
117 (addr)->addr_bytes[0], (addr)->addr_bytes[1], \
118 (addr)->addr_bytes[2], (addr)->addr_bytes[3], \
119 (addr)->addr_bytes[4], (addr)->addr_bytes[5], \
122 #define FRAG_TBL_BUCKET_ENTRIES 4
123 #define MAX_FRAG_TTL_NS (10LL * NS_PER_S)
125 #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
127 /* port/source ethernet addr and destination ethernet addr */
128 struct ethaddr_info {
132 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
133 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
134 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
135 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
136 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
139 struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS];
141 #define CMD_LINE_OPT_CONFIG "config"
142 #define CMD_LINE_OPT_SINGLE_SA "single-sa"
143 #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
144 #define CMD_LINE_OPT_TRANSFER_MODE "transfer-mode"
145 #define CMD_LINE_OPT_SCHEDULE_TYPE "event-schedule-type"
146 #define CMD_LINE_OPT_RX_OFFLOAD "rxoffload"
147 #define CMD_LINE_OPT_TX_OFFLOAD "txoffload"
148 #define CMD_LINE_OPT_REASSEMBLE "reassemble"
149 #define CMD_LINE_OPT_MTU "mtu"
150 #define CMD_LINE_OPT_FRAG_TTL "frag-ttl"
152 #define CMD_LINE_ARG_EVENT "event"
153 #define CMD_LINE_ARG_POLL "poll"
154 #define CMD_LINE_ARG_ORDERED "ordered"
155 #define CMD_LINE_ARG_ATOMIC "atomic"
156 #define CMD_LINE_ARG_PARALLEL "parallel"
159 /* long options mapped to a short option */
161 /* first long only option value must be >= 256, so that we won't
162 * conflict with short options
164 CMD_LINE_OPT_MIN_NUM = 256,
165 CMD_LINE_OPT_CONFIG_NUM,
166 CMD_LINE_OPT_SINGLE_SA_NUM,
167 CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
168 CMD_LINE_OPT_TRANSFER_MODE_NUM,
169 CMD_LINE_OPT_SCHEDULE_TYPE_NUM,
170 CMD_LINE_OPT_RX_OFFLOAD_NUM,
171 CMD_LINE_OPT_TX_OFFLOAD_NUM,
172 CMD_LINE_OPT_REASSEMBLE_NUM,
173 CMD_LINE_OPT_MTU_NUM,
174 CMD_LINE_OPT_FRAG_TTL_NUM,
177 static const struct option lgopts[] = {
178 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
179 {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
180 {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
181 {CMD_LINE_OPT_TRANSFER_MODE, 1, 0, CMD_LINE_OPT_TRANSFER_MODE_NUM},
182 {CMD_LINE_OPT_SCHEDULE_TYPE, 1, 0, CMD_LINE_OPT_SCHEDULE_TYPE_NUM},
183 {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
184 {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
185 {CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
186 {CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
187 {CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM},
191 /* mask of enabled ports */
192 static uint32_t enabled_port_mask;
193 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
194 static uint32_t unprotected_port_mask;
195 static int32_t promiscuous_on = 1;
196 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
197 static uint32_t nb_lcores;
198 static uint32_t single_sa;
199 static uint32_t single_sa_idx;
202 * RX/TX HW offload capabilities to enable/use on ethernet ports.
203 * By default all capabilities are enabled.
205 static uint64_t dev_rx_offload = UINT64_MAX;
206 static uint64_t dev_tx_offload = UINT64_MAX;
209 * global values that determine multi-seg policy
211 static uint32_t frag_tbl_sz;
212 static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
213 static uint32_t mtu_size = RTE_ETHER_MTU;
214 static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
216 /* application wide librte_ipsec/SA parameters */
217 struct app_sa_prm app_sa_prm = {
219 .cache_sz = SA_CACHE_SZ
221 static const char *cfgfile;
223 struct lcore_rx_queue {
226 } __rte_cache_aligned;
228 struct lcore_params {
232 } __rte_cache_aligned;
234 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
236 static struct lcore_params *lcore_params;
237 static uint16_t nb_lcore_params;
239 static struct rte_hash *cdev_map_in;
240 static struct rte_hash *cdev_map_out;
244 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
248 uint16_t nb_rx_queue;
249 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
250 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
251 struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
252 struct ipsec_ctx inbound;
253 struct ipsec_ctx outbound;
254 struct rt_ctx *rt4_ctx;
255 struct rt_ctx *rt6_ctx;
257 struct rte_ip_frag_tbl *tbl;
258 struct rte_mempool *pool_dir;
259 struct rte_mempool *pool_indir;
260 struct rte_ip_frag_death_row dr;
262 } __rte_cache_aligned;
264 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
266 static struct rte_eth_conf port_conf = {
268 .mq_mode = ETH_MQ_RX_RSS,
269 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
271 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
276 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
277 ETH_RSS_TCP | ETH_RSS_SCTP,
281 .mq_mode = ETH_MQ_TX_NONE,
285 static struct socket_ctx socket_ctx[NB_SOCKETS];
288 * Determine is multi-segment support required:
289 * - either frame buffer size is smaller then mtu
290 * - or reassmeble support is requested
293 multi_seg_required(void)
295 return (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM >
296 frame_buf_size || frag_tbl_sz != 0);
300 adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
305 plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
306 if (plen < m->pkt_len) {
307 trim = m->pkt_len - plen;
308 rte_pktmbuf_trim(m, trim);
313 adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
318 plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
319 if (plen < m->pkt_len) {
320 trim = m->pkt_len - plen;
321 rte_pktmbuf_trim(m, trim);
326 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
328 const struct rte_ether_hdr *eth;
329 const struct rte_ipv4_hdr *iph4;
330 const struct rte_ipv6_hdr *iph6;
332 eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
333 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
335 iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
337 adjust_ipv4_pktlen(pkt, iph4, 0);
339 if (iph4->next_proto_id == IPPROTO_ESP)
340 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
342 t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
343 t->ip4.pkts[(t->ip4.num)++] = pkt;
346 pkt->l3_len = sizeof(*iph4);
347 pkt->packet_type |= RTE_PTYPE_L3_IPV4;
348 } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
350 size_t l3len, ext_len;
353 /* get protocol type */
354 iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
356 adjust_ipv6_pktlen(pkt, iph6, 0);
358 next_proto = iph6->proto;
360 /* determine l3 header size up to ESP extension */
361 l3len = sizeof(struct ip6_hdr);
362 p = rte_pktmbuf_mtod(pkt, uint8_t *);
363 while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
364 (next_proto = rte_ipv6_get_next_ext(p + l3len,
365 next_proto, &ext_len)) >= 0)
368 /* drop packet when IPv6 header exceeds first segment length */
369 if (unlikely(l3len > pkt->data_len)) {
370 rte_pktmbuf_free(pkt);
374 if (next_proto == IPPROTO_ESP)
375 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
377 t->ip6.data[t->ip6.num] = &iph6->proto;
378 t->ip6.pkts[(t->ip6.num)++] = pkt;
382 pkt->packet_type |= RTE_PTYPE_L3_IPV6;
384 /* Unknown/Unsupported type, drop the packet */
385 RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
386 rte_be_to_cpu_16(eth->ether_type));
387 rte_pktmbuf_free(pkt);
391 /* Check if the packet has been processed inline. For inline protocol
392 * processed packets, the metadata in the mbuf can be used to identify
393 * the security processing done on the packet. The metadata will be
394 * used to retrieve the application registered userdata associated
395 * with the security session.
398 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
400 struct ipsec_mbuf_metadata *priv;
401 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
402 rte_eth_dev_get_sec_ctx(
405 /* Retrieve the userdata registered. Here, the userdata
406 * registered is the SA pointer.
409 sa = (struct ipsec_sa *)
410 rte_security_get_userdata(ctx, pkt->udata64);
413 /* userdata could not be retrieved */
417 /* Save SA as priv member in mbuf. This will be used in the
418 * IPsec selector(SP-SA) check.
421 priv = get_priv(pkt);
427 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
436 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
437 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
439 prepare_one_packet(pkts[i], t);
441 /* Process left packets */
442 for (; i < nb_pkts; i++)
443 prepare_one_packet(pkts[i], t);
447 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
448 const struct lcore_conf *qconf)
451 struct rte_ether_hdr *ethhdr;
453 ip = rte_pktmbuf_mtod(pkt, struct ip *);
455 ethhdr = (struct rte_ether_hdr *)
456 rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
458 if (ip->ip_v == IPVERSION) {
459 pkt->ol_flags |= qconf->outbound.ipv4_offloads;
460 pkt->l3_len = sizeof(struct ip);
461 pkt->l2_len = RTE_ETHER_HDR_LEN;
465 /* calculate IPv4 cksum in SW */
466 if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
467 ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
469 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
471 pkt->ol_flags |= qconf->outbound.ipv6_offloads;
472 pkt->l3_len = sizeof(struct ip6_hdr);
473 pkt->l2_len = RTE_ETHER_HDR_LEN;
475 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
478 memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
479 sizeof(struct rte_ether_addr));
480 memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
481 sizeof(struct rte_ether_addr));
485 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
486 const struct lcore_conf *qconf)
489 const int32_t prefetch_offset = 2;
491 for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
492 rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
493 prepare_tx_pkt(pkts[i], port, qconf);
495 /* Process left packets */
496 for (; i < nb_pkts; i++)
497 prepare_tx_pkt(pkts[i], port, qconf);
500 /* Send burst of packets on an output interface */
501 static inline int32_t
502 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
504 struct rte_mbuf **m_table;
508 queueid = qconf->tx_queue_id[port];
509 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
511 prepare_tx_burst(m_table, n, port, qconf);
513 ret = rte_eth_tx_burst(port, queueid, m_table, n);
514 if (unlikely(ret < n)) {
516 rte_pktmbuf_free(m_table[ret]);
524 * Helper function to fragment and queue for TX one packet.
526 static inline uint32_t
527 send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
528 uint16_t port, uint8_t proto)
534 tbl = qconf->tx_mbufs + port;
537 /* free space for new fragments */
538 if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
539 send_burst(qconf, len, port);
543 n = RTE_DIM(tbl->m_table) - len;
545 if (proto == IPPROTO_IP)
546 rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
547 n, mtu_size, qconf->frag.pool_dir,
548 qconf->frag.pool_indir);
550 rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
551 n, mtu_size, qconf->frag.pool_dir,
552 qconf->frag.pool_indir);
558 "%s: failed to fragment packet with size %u, "
560 __func__, m->pkt_len, rte_errno);
566 /* Enqueue a single packet, and send burst if queue is filled */
567 static inline int32_t
568 send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
572 struct lcore_conf *qconf;
574 lcore_id = rte_lcore_id();
576 qconf = &lcore_conf[lcore_id];
577 len = qconf->tx_mbufs[port].len;
579 if (m->pkt_len <= mtu_size) {
580 qconf->tx_mbufs[port].m_table[len] = m;
583 /* need to fragment the packet */
584 } else if (frag_tbl_sz > 0)
585 len = send_fragment_packet(qconf, m, port, proto);
589 /* enough pkts to be sent */
590 if (unlikely(len == MAX_PKT_BURST)) {
591 send_burst(qconf, MAX_PKT_BURST, port);
595 qconf->tx_mbufs[port].len = len;
600 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
604 uint32_t i, j, res, sa_idx;
606 if (ip->num == 0 || sp == NULL)
609 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
610 ip->num, DEFAULT_MAX_CATEGORIES);
613 for (i = 0; i < ip->num; i++) {
620 if (res == DISCARD) {
625 /* Only check SPI match for processed IPSec packets */
626 if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
632 if (!inbound_sa_check(sa, m, sa_idx)) {
642 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
651 for (i = 0; i < num; i++) {
654 ip = rte_pktmbuf_mtod(m, struct ip *);
656 if (ip->ip_v == IPVERSION) {
657 trf->ip4.pkts[n4] = m;
658 trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
659 uint8_t *, offsetof(struct ip, ip_p));
661 } else if (ip->ip_v == IP6_VERSION) {
662 trf->ip6.pkts[n6] = m;
663 trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
665 offsetof(struct ip6_hdr, ip6_nxt));
677 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
678 struct ipsec_traffic *traffic)
680 uint16_t nb_pkts_in, n_ip4, n_ip6;
682 n_ip4 = traffic->ip4.num;
683 n_ip6 = traffic->ip6.num;
685 if (app_sa_prm.enable == 0) {
686 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
687 traffic->ipsec.num, MAX_PKT_BURST);
688 split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
690 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
691 traffic->ipsec.saptr, traffic->ipsec.num);
692 ipsec_process(ipsec_ctx, traffic);
695 inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
698 inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
703 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
704 struct traffic_type *ipsec)
707 uint32_t i, j, sa_idx;
709 if (ip->num == 0 || sp == NULL)
712 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
713 ip->num, DEFAULT_MAX_CATEGORIES);
716 for (i = 0; i < ip->num; i++) {
718 sa_idx = ip->res[i] - 1;
719 if (ip->res[i] == DISCARD)
721 else if (ip->res[i] == BYPASS)
724 ipsec->res[ipsec->num] = sa_idx;
725 ipsec->pkts[ipsec->num++] = m;
732 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
733 struct ipsec_traffic *traffic)
736 uint16_t idx, nb_pkts_out, i;
738 /* Drop any IPsec traffic from protected ports */
739 for (i = 0; i < traffic->ipsec.num; i++)
740 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
742 traffic->ipsec.num = 0;
744 outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
746 outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
748 if (app_sa_prm.enable == 0) {
750 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
751 traffic->ipsec.res, traffic->ipsec.num,
754 for (i = 0; i < nb_pkts_out; i++) {
755 m = traffic->ipsec.pkts[i];
756 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
757 if (ip->ip_v == IPVERSION) {
758 idx = traffic->ip4.num++;
759 traffic->ip4.pkts[idx] = m;
761 idx = traffic->ip6.num++;
762 traffic->ip6.pkts[idx] = m;
766 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
767 traffic->ipsec.saptr, traffic->ipsec.num);
768 ipsec_process(ipsec_ctx, traffic);
773 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
774 struct ipsec_traffic *traffic)
777 uint32_t nb_pkts_in, i, idx;
779 /* Drop any IPv4 traffic from unprotected ports */
780 for (i = 0; i < traffic->ip4.num; i++)
781 rte_pktmbuf_free(traffic->ip4.pkts[i]);
783 traffic->ip4.num = 0;
785 /* Drop any IPv6 traffic from unprotected ports */
786 for (i = 0; i < traffic->ip6.num; i++)
787 rte_pktmbuf_free(traffic->ip6.pkts[i]);
789 traffic->ip6.num = 0;
791 if (app_sa_prm.enable == 0) {
793 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
794 traffic->ipsec.num, MAX_PKT_BURST);
796 for (i = 0; i < nb_pkts_in; i++) {
797 m = traffic->ipsec.pkts[i];
798 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
799 if (ip->ip_v == IPVERSION) {
800 idx = traffic->ip4.num++;
801 traffic->ip4.pkts[idx] = m;
803 idx = traffic->ip6.num++;
804 traffic->ip6.pkts[idx] = m;
808 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
809 traffic->ipsec.saptr, traffic->ipsec.num);
810 ipsec_process(ipsec_ctx, traffic);
815 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
816 struct ipsec_traffic *traffic)
819 uint32_t nb_pkts_out, i, n;
822 /* Drop any IPsec traffic from protected ports */
823 for (i = 0; i < traffic->ipsec.num; i++)
824 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
828 for (i = 0; i < traffic->ip4.num; i++) {
829 traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
830 traffic->ipsec.res[n++] = single_sa_idx;
833 for (i = 0; i < traffic->ip6.num; i++) {
834 traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
835 traffic->ipsec.res[n++] = single_sa_idx;
838 traffic->ip4.num = 0;
839 traffic->ip6.num = 0;
840 traffic->ipsec.num = n;
842 if (app_sa_prm.enable == 0) {
844 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
845 traffic->ipsec.res, traffic->ipsec.num,
848 /* They all sue the same SA (ip4 or ip6 tunnel) */
849 m = traffic->ipsec.pkts[0];
850 ip = rte_pktmbuf_mtod(m, struct ip *);
851 if (ip->ip_v == IPVERSION) {
852 traffic->ip4.num = nb_pkts_out;
853 for (i = 0; i < nb_pkts_out; i++)
854 traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
856 traffic->ip6.num = nb_pkts_out;
857 for (i = 0; i < nb_pkts_out; i++)
858 traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
861 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
862 traffic->ipsec.saptr, traffic->ipsec.num);
863 ipsec_process(ipsec_ctx, traffic);
867 static inline int32_t
868 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
870 struct ipsec_mbuf_metadata *priv;
873 priv = get_priv(pkt);
876 if (unlikely(sa == NULL)) {
877 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
885 return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
896 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
898 uint32_t hop[MAX_PKT_BURST * 2];
899 uint32_t dst_ip[MAX_PKT_BURST * 2];
902 uint16_t lpm_pkts = 0;
907 /* Need to do an LPM lookup for non-inline packets. Inline packets will
908 * have port ID in the SA
911 for (i = 0; i < nb_pkts; i++) {
912 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
913 /* Security offload not enabled. So an LPM lookup is
914 * required to get the hop
916 offset = offsetof(struct ip, ip_dst);
917 dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
919 dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
924 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
928 for (i = 0; i < nb_pkts; i++) {
929 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
930 /* Read hop from the SA */
931 pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
933 /* Need to use hop returned by lookup */
934 pkt_hop = hop[lpm_pkts++];
937 if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
938 rte_pktmbuf_free(pkts[i]);
941 send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
946 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
948 int32_t hop[MAX_PKT_BURST * 2];
949 uint8_t dst_ip[MAX_PKT_BURST * 2][16];
953 uint16_t lpm_pkts = 0;
958 /* Need to do an LPM lookup for non-inline packets. Inline packets will
959 * have port ID in the SA
962 for (i = 0; i < nb_pkts; i++) {
963 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
964 /* Security offload not enabled. So an LPM lookup is
965 * required to get the hop
967 offset = offsetof(struct ip6_hdr, ip6_dst);
968 ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
970 memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
975 rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
980 for (i = 0; i < nb_pkts; i++) {
981 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
982 /* Read hop from the SA */
983 pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
985 /* Need to use hop returned by lookup */
986 pkt_hop = hop[lpm_pkts++];
990 rte_pktmbuf_free(pkts[i]);
993 send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
998 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
999 uint8_t nb_pkts, uint16_t portid)
1001 struct ipsec_traffic traffic;
1003 prepare_traffic(pkts, &traffic, nb_pkts);
1005 if (unlikely(single_sa)) {
1006 if (UNPROTECTED_PORT(portid))
1007 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
1009 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
1011 if (UNPROTECTED_PORT(portid))
1012 process_pkts_inbound(&qconf->inbound, &traffic);
1014 process_pkts_outbound(&qconf->outbound, &traffic);
1017 route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
1018 route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
1022 drain_tx_buffers(struct lcore_conf *qconf)
1027 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1028 buf = &qconf->tx_mbufs[portid];
1031 send_burst(qconf, buf->len, portid);
1037 drain_crypto_buffers(struct lcore_conf *qconf)
1040 struct ipsec_ctx *ctx;
1042 /* drain inbound buffers*/
1043 ctx = &qconf->inbound;
1044 for (i = 0; i != ctx->nb_qps; i++) {
1045 if (ctx->tbl[i].len != 0)
1046 enqueue_cop_burst(ctx->tbl + i);
1049 /* drain outbound buffers*/
1050 ctx = &qconf->outbound;
1051 for (i = 0; i != ctx->nb_qps; i++) {
1052 if (ctx->tbl[i].len != 0)
1053 enqueue_cop_burst(ctx->tbl + i);
1058 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
1059 struct ipsec_ctx *ctx)
1062 struct ipsec_traffic trf;
1064 if (app_sa_prm.enable == 0) {
1066 /* dequeue packets from crypto-queue */
1067 n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
1068 RTE_DIM(trf.ipsec.pkts));
1073 /* split traffic by ipv4-ipv6 */
1074 split46_traffic(&trf, trf.ipsec.pkts, n);
1076 ipsec_cqp_process(ctx, &trf);
1078 /* process ipv4 packets */
1079 if (trf.ip4.num != 0) {
1080 inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
1081 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
1084 /* process ipv6 packets */
1085 if (trf.ip6.num != 0) {
1086 inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
1087 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1092 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
1093 struct ipsec_ctx *ctx)
1096 struct ipsec_traffic trf;
1098 if (app_sa_prm.enable == 0) {
1100 /* dequeue packets from crypto-queue */
1101 n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
1102 RTE_DIM(trf.ipsec.pkts));
1107 /* split traffic by ipv4-ipv6 */
1108 split46_traffic(&trf, trf.ipsec.pkts, n);
1110 ipsec_cqp_process(ctx, &trf);
1112 /* process ipv4 packets */
1113 if (trf.ip4.num != 0)
1114 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
1116 /* process ipv6 packets */
1117 if (trf.ip6.num != 0)
1118 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1121 /* main processing loop */
1123 main_loop(__attribute__((unused)) void *dummy)
1125 struct rte_mbuf *pkts[MAX_PKT_BURST];
1127 uint64_t prev_tsc, diff_tsc, cur_tsc;
1131 struct lcore_conf *qconf;
1132 int32_t rc, socket_id;
1133 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1134 / US_PER_S * BURST_TX_DRAIN_US;
1135 struct lcore_rx_queue *rxql;
1138 lcore_id = rte_lcore_id();
1139 qconf = &lcore_conf[lcore_id];
1140 rxql = qconf->rx_queue_list;
1141 socket_id = rte_lcore_to_socket_id(lcore_id);
1143 qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
1144 qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
1145 qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
1146 qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
1147 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
1148 qconf->inbound.cdev_map = cdev_map_in;
1149 qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
1150 qconf->inbound.session_priv_pool =
1151 socket_ctx[socket_id].session_priv_pool;
1152 qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
1153 qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
1154 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
1155 qconf->outbound.cdev_map = cdev_map_out;
1156 qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
1157 qconf->outbound.session_priv_pool =
1158 socket_ctx[socket_id].session_priv_pool;
1159 qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;
1160 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1162 rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
1165 "SAD cache init on lcore %u, failed with code: %d\n",
1170 if (qconf->nb_rx_queue == 0) {
1171 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1176 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1178 for (i = 0; i < qconf->nb_rx_queue; i++) {
1179 portid = rxql[i].port_id;
1180 queueid = rxql[i].queue_id;
1181 RTE_LOG(INFO, IPSEC,
1182 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1183 lcore_id, portid, queueid);
1187 cur_tsc = rte_rdtsc();
1189 /* TX queue buffer drain */
1190 diff_tsc = cur_tsc - prev_tsc;
1192 if (unlikely(diff_tsc > drain_tsc)) {
1193 drain_tx_buffers(qconf);
1194 drain_crypto_buffers(qconf);
1198 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1200 /* Read packets from RX queues */
1201 portid = rxql[i].port_id;
1202 queueid = rxql[i].queue_id;
1203 nb_rx = rte_eth_rx_burst(portid, queueid,
1204 pkts, MAX_PKT_BURST);
1207 process_pkts(qconf, pkts, nb_rx, portid);
1209 /* dequeue and process completed crypto-ops */
1210 if (UNPROTECTED_PORT(portid))
1211 drain_inbound_crypto_queues(qconf,
1214 drain_outbound_crypto_queues(qconf,
1221 check_poll_mode_params(struct eh_conf *eh_conf)
1231 if (eh_conf->mode != EH_PKT_TRANSFER_MODE_POLL)
1234 if (lcore_params == NULL) {
1235 printf("Error: No port/queue/core mappings\n");
1239 for (i = 0; i < nb_lcore_params; ++i) {
1240 lcore = lcore_params[i].lcore_id;
1241 if (!rte_lcore_is_enabled(lcore)) {
1242 printf("error: lcore %hhu is not enabled in "
1243 "lcore mask\n", lcore);
1246 socket_id = rte_lcore_to_socket_id(lcore);
1247 if (socket_id != 0 && numa_on == 0) {
1248 printf("warning: lcore %hhu is on socket %d "
1252 portid = lcore_params[i].port_id;
1253 if ((enabled_port_mask & (1 << portid)) == 0) {
1254 printf("port %u is not enabled in port mask\n", portid);
1257 if (!rte_eth_dev_is_valid_port(portid)) {
1258 printf("port %u is not present on the board\n", portid);
1266 get_port_nb_rx_queues(const uint16_t port)
1271 for (i = 0; i < nb_lcore_params; ++i) {
1272 if (lcore_params[i].port_id == port &&
1273 lcore_params[i].queue_id > queue)
1274 queue = lcore_params[i].queue_id;
1276 return (uint8_t)(++queue);
1280 init_lcore_rx_queues(void)
1282 uint16_t i, nb_rx_queue;
1285 for (i = 0; i < nb_lcore_params; ++i) {
1286 lcore = lcore_params[i].lcore_id;
1287 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
1288 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1289 printf("error: too many queues (%u) for lcore: %u\n",
1290 nb_rx_queue + 1, lcore);
1293 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1294 lcore_params[i].port_id;
1295 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1296 lcore_params[i].queue_id;
1297 lcore_conf[lcore].nb_rx_queue++;
1304 print_usage(const char *prgname)
1306 fprintf(stderr, "%s [EAL options] --"
1312 " [-w REPLAY_WINDOW_SIZE]"
1317 " --config (port,queue,lcore)[,(port,queue,lcore)]"
1318 " [--single-sa SAIDX]"
1319 " [--cryptodev_mask MASK]"
1320 " [--transfer-mode MODE]"
1321 " [--event-schedule-type TYPE]"
1322 " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
1323 " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
1324 " [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
1325 " [--" CMD_LINE_OPT_MTU " MTU]"
1327 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
1328 " -P : Enable promiscuous mode\n"
1329 " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
1330 " -j FRAMESIZE: Data buffer size, minimum (and default)\n"
1331 " value: RTE_MBUF_DEFAULT_BUF_SIZE\n"
1332 " -l enables code-path that uses librte_ipsec\n"
1333 " -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n"
1334 " size for each SA\n"
1336 " -a enables SA SQN atomic behaviour\n"
1337 " -c specifies inbound SAD cache size,\n"
1338 " zero value disables the cache (default value: 128)\n"
1339 " -f CONFIG_FILE: Configuration file\n"
1340 " --config (port,queue,lcore): Rx queue configuration. In poll\n"
1341 " mode determines which queues from\n"
1342 " which ports are mapped to which cores.\n"
1343 " In event mode this option is not used\n"
1344 " as packets are dynamically scheduled\n"
1345 " to cores by HW.\n"
1346 " --single-sa SAIDX: Use single SA index for outbound traffic,\n"
1347 " bypassing the SP\n"
1348 " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
1349 " devices to configure\n"
1350 " --transfer-mode MODE\n"
1351 " \"poll\" : Packet transfer via polling (default)\n"
1352 " \"event\" : Packet transfer via event device\n"
1353 " --event-schedule-type TYPE queue schedule type, used only when\n"
1354 " transfer mode is set to event\n"
1355 " \"ordered\" : Ordered (default)\n"
1356 " \"atomic\" : Atomic\n"
1357 " \"parallel\" : Parallel\n"
1358 " --" CMD_LINE_OPT_RX_OFFLOAD
1359 ": bitmask of the RX HW offload capabilities to enable/use\n"
1360 " (DEV_RX_OFFLOAD_*)\n"
1361 " --" CMD_LINE_OPT_TX_OFFLOAD
1362 ": bitmask of the TX HW offload capabilities to enable/use\n"
1363 " (DEV_TX_OFFLOAD_*)\n"
1364 " --" CMD_LINE_OPT_REASSEMBLE " NUM"
1365 ": max number of entries in reassemble(fragment) table\n"
1366 " (zero (default value) disables reassembly)\n"
1367 " --" CMD_LINE_OPT_MTU " MTU"
1368 ": MTU value on all ports (default value: 1500)\n"
1369 " outgoing packets with bigger size will be fragmented\n"
1370 " incoming packets with bigger size will be discarded\n"
1371 " --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS"
1372 ": fragments lifetime in nanoseconds, default\n"
1373 " and maximum value is 10.000.000.000 ns (10 s)\n"
1379 parse_mask(const char *str, uint64_t *val)
1385 t = strtoul(str, &end, 0);
1386 if (errno != 0 || end[0] != 0)
1394 parse_portmask(const char *portmask)
1399 /* parse hexadecimal string */
1400 pm = strtoul(portmask, &end, 16);
1401 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1404 if ((pm == 0) && errno)
1411 parse_decimal(const char *str)
1416 num = strtoull(str, &end, 10);
1417 if ((str[0] == '\0') || (end == NULL) || (*end != '\0')
1425 parse_config(const char *q_arg)
1428 const char *p, *p0 = q_arg;
1436 unsigned long int_fld[_NUM_FLD];
1437 char *str_fld[_NUM_FLD];
1441 nb_lcore_params = 0;
1443 while ((p = strchr(p0, '(')) != NULL) {
1445 p0 = strchr(p, ')');
1450 if (size >= sizeof(s))
1453 snprintf(s, sizeof(s), "%.*s", size, p);
1454 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1457 for (i = 0; i < _NUM_FLD; i++) {
1459 int_fld[i] = strtoul(str_fld[i], &end, 0);
1460 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1463 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1464 printf("exceeded max number of lcore params: %hu\n",
1468 lcore_params_array[nb_lcore_params].port_id =
1469 (uint8_t)int_fld[FLD_PORT];
1470 lcore_params_array[nb_lcore_params].queue_id =
1471 (uint8_t)int_fld[FLD_QUEUE];
1472 lcore_params_array[nb_lcore_params].lcore_id =
1473 (uint8_t)int_fld[FLD_LCORE];
1476 lcore_params = lcore_params_array;
1481 print_app_sa_prm(const struct app_sa_prm *prm)
1483 printf("librte_ipsec usage: %s\n",
1484 (prm->enable == 0) ? "disabled" : "enabled");
1486 printf("replay window size: %u\n", prm->window_size);
1487 printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
1488 printf("SA flags: %#" PRIx64 "\n", prm->flags);
1489 printf("Frag TTL: %" PRIu64 " ns\n", frag_ttl_ns);
1493 parse_transfer_mode(struct eh_conf *conf, const char *optarg)
1495 if (!strcmp(CMD_LINE_ARG_POLL, optarg))
1496 conf->mode = EH_PKT_TRANSFER_MODE_POLL;
1497 else if (!strcmp(CMD_LINE_ARG_EVENT, optarg))
1498 conf->mode = EH_PKT_TRANSFER_MODE_EVENT;
1500 printf("Unsupported packet transfer mode\n");
1508 parse_schedule_type(struct eh_conf *conf, const char *optarg)
1510 struct eventmode_conf *em_conf = NULL;
1512 /* Get eventmode conf */
1513 em_conf = conf->mode_params;
1515 if (!strcmp(CMD_LINE_ARG_ORDERED, optarg))
1516 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
1517 else if (!strcmp(CMD_LINE_ARG_ATOMIC, optarg))
1518 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ATOMIC;
1519 else if (!strcmp(CMD_LINE_ARG_PARALLEL, optarg))
1520 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_PARALLEL;
1522 printf("Unsupported queue schedule type\n");
1530 parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
1535 int32_t option_index;
1536 char *prgname = argv[0];
1537 int32_t f_present = 0;
1541 while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:",
1542 lgopts, &option_index)) != EOF) {
1546 enabled_port_mask = parse_portmask(optarg);
1547 if (enabled_port_mask == 0) {
1548 printf("invalid portmask\n");
1549 print_usage(prgname);
1554 printf("Promiscuous mode selected\n");
1558 unprotected_port_mask = parse_portmask(optarg);
1559 if (unprotected_port_mask == 0) {
1560 printf("invalid unprotected portmask\n");
1561 print_usage(prgname);
1566 if (f_present == 1) {
1567 printf("\"-f\" option present more than "
1569 print_usage(prgname);
1576 ret = parse_decimal(optarg);
1577 if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
1579 printf("Invalid frame buffer size value: %s\n",
1581 print_usage(prgname);
1584 frame_buf_size = ret;
1585 printf("Custom frame buffer size %u\n", frame_buf_size);
1588 app_sa_prm.enable = 1;
1591 app_sa_prm.window_size = parse_decimal(optarg);
1594 app_sa_prm.enable_esn = 1;
1597 app_sa_prm.enable = 1;
1598 app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
1601 ret = parse_decimal(optarg);
1603 printf("Invalid SA cache size: %s\n", optarg);
1604 print_usage(prgname);
1607 app_sa_prm.cache_sz = ret;
1609 case CMD_LINE_OPT_CONFIG_NUM:
1610 ret = parse_config(optarg);
1612 printf("Invalid config\n");
1613 print_usage(prgname);
1617 case CMD_LINE_OPT_SINGLE_SA_NUM:
1618 ret = parse_decimal(optarg);
1619 if (ret == -1 || ret > UINT32_MAX) {
1620 printf("Invalid argument[sa_idx]\n");
1621 print_usage(prgname);
1627 single_sa_idx = ret;
1628 eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1629 printf("Configured with single SA index %u\n",
1632 case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1633 ret = parse_portmask(optarg);
1635 printf("Invalid argument[portmask]\n");
1636 print_usage(prgname);
1641 enabled_cryptodev_mask = ret;
1644 case CMD_LINE_OPT_TRANSFER_MODE_NUM:
1645 ret = parse_transfer_mode(eh_conf, optarg);
1647 printf("Invalid packet transfer mode\n");
1648 print_usage(prgname);
1653 case CMD_LINE_OPT_SCHEDULE_TYPE_NUM:
1654 ret = parse_schedule_type(eh_conf, optarg);
1656 printf("Invalid queue schedule type\n");
1657 print_usage(prgname);
1662 case CMD_LINE_OPT_RX_OFFLOAD_NUM:
1663 ret = parse_mask(optarg, &dev_rx_offload);
1665 printf("Invalid argument for \'%s\': %s\n",
1666 CMD_LINE_OPT_RX_OFFLOAD, optarg);
1667 print_usage(prgname);
1671 case CMD_LINE_OPT_TX_OFFLOAD_NUM:
1672 ret = parse_mask(optarg, &dev_tx_offload);
1674 printf("Invalid argument for \'%s\': %s\n",
1675 CMD_LINE_OPT_TX_OFFLOAD, optarg);
1676 print_usage(prgname);
1680 case CMD_LINE_OPT_REASSEMBLE_NUM:
1681 ret = parse_decimal(optarg);
1682 if (ret < 0 || ret > UINT32_MAX) {
1683 printf("Invalid argument for \'%s\': %s\n",
1684 CMD_LINE_OPT_REASSEMBLE, optarg);
1685 print_usage(prgname);
1690 case CMD_LINE_OPT_MTU_NUM:
1691 ret = parse_decimal(optarg);
1692 if (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) {
1693 printf("Invalid argument for \'%s\': %s\n",
1694 CMD_LINE_OPT_MTU, optarg);
1695 print_usage(prgname);
1700 case CMD_LINE_OPT_FRAG_TTL_NUM:
1701 ret = parse_decimal(optarg);
1702 if (ret < 0 || ret > MAX_FRAG_TTL_NS) {
1703 printf("Invalid argument for \'%s\': %s\n",
1704 CMD_LINE_OPT_MTU, optarg);
1705 print_usage(prgname);
1711 print_usage(prgname);
1716 if (f_present == 0) {
1717 printf("Mandatory option \"-f\" not present\n");
1721 /* check do we need to enable multi-seg support */
1722 if (multi_seg_required()) {
1723 /* legacy mode doesn't support multi-seg */
1724 app_sa_prm.enable = 1;
1725 printf("frame buf size: %u, mtu: %u, "
1726 "number of reassemble entries: %u\n"
1727 "multi-segment support is required\n",
1728 frame_buf_size, mtu_size, frag_tbl_sz);
1731 print_app_sa_prm(&app_sa_prm);
1734 argv[optind-1] = prgname;
1737 optind = 1; /* reset getopt lib */
1742 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
1744 char buf[RTE_ETHER_ADDR_FMT_SIZE];
1745 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
1746 printf("%s%s", name, buf);
1750 * Update destination ethaddr for the port.
1753 add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr)
1755 if (port >= RTE_DIM(ethaddr_tbl))
1758 ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr);
1762 /* Check the link status of all ports in up to 9s, and print them finally */
1764 check_all_ports_link_status(uint32_t port_mask)
1766 #define CHECK_INTERVAL 100 /* 100ms */
1767 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1769 uint8_t count, all_ports_up, print_flag = 0;
1770 struct rte_eth_link link;
1773 printf("\nChecking link status");
1775 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1777 RTE_ETH_FOREACH_DEV(portid) {
1778 if ((port_mask & (1 << portid)) == 0)
1780 memset(&link, 0, sizeof(link));
1781 ret = rte_eth_link_get_nowait(portid, &link);
1784 if (print_flag == 1)
1785 printf("Port %u link get failed: %s\n",
1786 portid, rte_strerror(-ret));
1789 /* print link status if flag set */
1790 if (print_flag == 1) {
1791 if (link.link_status)
1793 "Port%d Link Up - speed %u Mbps -%s\n",
1794 portid, link.link_speed,
1795 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1796 ("full-duplex") : ("half-duplex\n"));
1798 printf("Port %d Link Down\n", portid);
1801 /* clear all_ports_up flag if any link down */
1802 if (link.link_status == ETH_LINK_DOWN) {
1807 /* after finally printing all link status, get out */
1808 if (print_flag == 1)
1811 if (all_ports_up == 0) {
1814 rte_delay_ms(CHECK_INTERVAL);
1817 /* set the print_flag if all ports up or timeout */
1818 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1826 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1827 uint16_t qp, struct lcore_params *params,
1828 struct ipsec_ctx *ipsec_ctx,
1829 const struct rte_cryptodev_capabilities *cipher,
1830 const struct rte_cryptodev_capabilities *auth,
1831 const struct rte_cryptodev_capabilities *aead)
1835 struct cdev_key key = { 0 };
1837 key.lcore_id = params->lcore_id;
1839 key.cipher_algo = cipher->sym.cipher.algo;
1841 key.auth_algo = auth->sym.auth.algo;
1843 key.aead_algo = aead->sym.aead.algo;
1845 ret = rte_hash_lookup(map, &key);
1849 for (i = 0; i < ipsec_ctx->nb_qps; i++)
1850 if (ipsec_ctx->tbl[i].id == cdev_id)
1853 if (i == ipsec_ctx->nb_qps) {
1854 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1855 printf("Maximum number of crypto devices assigned to "
1856 "a core, increase MAX_QP_PER_LCORE value\n");
1859 ipsec_ctx->tbl[i].id = cdev_id;
1860 ipsec_ctx->tbl[i].qp = qp;
1861 ipsec_ctx->nb_qps++;
1862 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1863 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1867 ret = rte_hash_add_key_data(map, &key, (void *)i);
1869 printf("Faled to insert cdev mapping for (lcore %u, "
1870 "cdev %u, qp %u), errno %d\n",
1871 key.lcore_id, ipsec_ctx->tbl[i].id,
1872 ipsec_ctx->tbl[i].qp, ret);
1880 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1881 uint16_t qp, struct lcore_params *params)
1884 const struct rte_cryptodev_capabilities *i, *j;
1885 struct rte_hash *map;
1886 struct lcore_conf *qconf;
1887 struct ipsec_ctx *ipsec_ctx;
1890 qconf = &lcore_conf[params->lcore_id];
1892 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1894 ipsec_ctx = &qconf->outbound;
1898 ipsec_ctx = &qconf->inbound;
1902 /* Required cryptodevs with operation chainning */
1903 if (!(dev_info->feature_flags &
1904 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1907 for (i = dev_info->capabilities;
1908 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1909 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1912 if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1913 ret |= add_mapping(map, str, cdev_id, qp, params,
1914 ipsec_ctx, NULL, NULL, i);
1918 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1921 for (j = dev_info->capabilities;
1922 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1923 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1926 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1929 ret |= add_mapping(map, str, cdev_id, qp, params,
1930 ipsec_ctx, i, j, NULL);
1937 /* Check if the device is enabled by cryptodev_mask */
1939 check_cryptodev_mask(uint8_t cdev_id)
1941 if (enabled_cryptodev_mask & (1 << cdev_id))
1948 cryptodevs_init(void)
1950 struct rte_cryptodev_config dev_conf;
1951 struct rte_cryptodev_qp_conf qp_conf;
1952 uint16_t idx, max_nb_qps, qp, i;
1954 struct rte_hash_parameters params = { 0 };
1956 const uint64_t mseg_flag = multi_seg_required() ?
1957 RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
1959 params.entries = CDEV_MAP_ENTRIES;
1960 params.key_len = sizeof(struct cdev_key);
1961 params.hash_func = rte_jhash;
1962 params.hash_func_init_val = 0;
1963 params.socket_id = rte_socket_id();
1965 params.name = "cdev_map_in";
1966 cdev_map_in = rte_hash_create(¶ms);
1967 if (cdev_map_in == NULL)
1968 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1971 params.name = "cdev_map_out";
1972 cdev_map_out = rte_hash_create(¶ms);
1973 if (cdev_map_out == NULL)
1974 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1977 printf("lcore/cryptodev/qp mappings:\n");
1980 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1981 struct rte_cryptodev_info cdev_info;
1983 if (check_cryptodev_mask((uint8_t)cdev_id))
1986 rte_cryptodev_info_get(cdev_id, &cdev_info);
1988 if ((mseg_flag & cdev_info.feature_flags) != mseg_flag)
1989 rte_exit(EXIT_FAILURE,
1990 "Device %hd does not support \'%s\' feature\n",
1992 rte_cryptodev_get_feature_name(mseg_flag));
1994 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1995 max_nb_qps = cdev_info.max_nb_queue_pairs;
1997 max_nb_qps = nb_lcore_params;
2001 while (qp < max_nb_qps && i < nb_lcore_params) {
2002 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
2003 &lcore_params[idx]))
2006 idx = idx % nb_lcore_params;
2013 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
2014 dev_conf.nb_queue_pairs = qp;
2015 dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
2017 uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
2018 if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS)
2019 rte_exit(EXIT_FAILURE,
2020 "Device does not support at least %u "
2021 "sessions", CDEV_MP_NB_OBJS);
2023 if (rte_cryptodev_configure(cdev_id, &dev_conf))
2024 rte_panic("Failed to initialize cryptodev %u\n",
2027 qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
2028 qp_conf.mp_session =
2029 socket_ctx[dev_conf.socket_id].session_pool;
2030 qp_conf.mp_session_private =
2031 socket_ctx[dev_conf.socket_id].session_priv_pool;
2032 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
2033 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
2034 &qp_conf, dev_conf.socket_id))
2035 rte_panic("Failed to setup queue %u for "
2036 "cdev_id %u\n", 0, cdev_id);
2038 if (rte_cryptodev_start(cdev_id))
2039 rte_panic("Failed to start cryptodev %u\n",
2049 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
2051 uint32_t frame_size;
2052 struct rte_eth_dev_info dev_info;
2053 struct rte_eth_txconf *txconf;
2054 uint16_t nb_tx_queue, nb_rx_queue;
2055 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
2056 int32_t ret, socket_id;
2057 struct lcore_conf *qconf;
2058 struct rte_ether_addr ethaddr;
2059 struct rte_eth_conf local_port_conf = port_conf;
2061 ret = rte_eth_dev_info_get(portid, &dev_info);
2063 rte_exit(EXIT_FAILURE,
2064 "Error during getting device (port %u) info: %s\n",
2065 portid, strerror(-ret));
2067 /* limit allowed HW offloafs, as user requested */
2068 dev_info.rx_offload_capa &= dev_rx_offload;
2069 dev_info.tx_offload_capa &= dev_tx_offload;
2071 printf("Configuring device port %u:\n", portid);
2073 ret = rte_eth_macaddr_get(portid, ðaddr);
2075 rte_exit(EXIT_FAILURE,
2076 "Error getting MAC address (port %u): %s\n",
2077 portid, rte_strerror(-ret));
2079 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr);
2080 print_ethaddr("Address: ", ðaddr);
2083 nb_rx_queue = get_port_nb_rx_queues(portid);
2084 nb_tx_queue = nb_lcores;
2086 if (nb_rx_queue > dev_info.max_rx_queues)
2087 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
2088 "(max rx queue is %u)\n",
2089 nb_rx_queue, dev_info.max_rx_queues);
2091 if (nb_tx_queue > dev_info.max_tx_queues)
2092 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
2093 "(max tx queue is %u)\n",
2094 nb_tx_queue, dev_info.max_tx_queues);
2096 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
2097 nb_rx_queue, nb_tx_queue);
2099 frame_size = MTU_TO_FRAMELEN(mtu_size);
2100 if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
2101 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2102 local_port_conf.rxmode.max_rx_pkt_len = frame_size;
2104 if (multi_seg_required()) {
2105 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
2106 local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
2109 local_port_conf.rxmode.offloads |= req_rx_offloads;
2110 local_port_conf.txmode.offloads |= req_tx_offloads;
2112 /* Check that all required capabilities are supported */
2113 if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
2114 local_port_conf.rxmode.offloads)
2115 rte_exit(EXIT_FAILURE,
2116 "Error: port %u required RX offloads: 0x%" PRIx64
2117 ", avaialbe RX offloads: 0x%" PRIx64 "\n",
2118 portid, local_port_conf.rxmode.offloads,
2119 dev_info.rx_offload_capa);
2121 if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
2122 local_port_conf.txmode.offloads)
2123 rte_exit(EXIT_FAILURE,
2124 "Error: port %u required TX offloads: 0x%" PRIx64
2125 ", avaialbe TX offloads: 0x%" PRIx64 "\n",
2126 portid, local_port_conf.txmode.offloads,
2127 dev_info.tx_offload_capa);
2129 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
2130 local_port_conf.txmode.offloads |=
2131 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
2133 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
2134 local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
2136 printf("port %u configurng rx_offloads=0x%" PRIx64
2137 ", tx_offloads=0x%" PRIx64 "\n",
2138 portid, local_port_conf.rxmode.offloads,
2139 local_port_conf.txmode.offloads);
2141 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
2142 dev_info.flow_type_rss_offloads;
2143 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
2144 port_conf.rx_adv_conf.rss_conf.rss_hf) {
2145 printf("Port %u modified RSS hash function based on hardware support,"
2146 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
2148 port_conf.rx_adv_conf.rss_conf.rss_hf,
2149 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
2152 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
2155 rte_exit(EXIT_FAILURE, "Cannot configure device: "
2156 "err=%d, port=%d\n", ret, portid);
2158 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
2160 rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
2161 "err=%d, port=%d\n", ret, portid);
2163 /* init one TX queue per lcore */
2165 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2166 if (rte_lcore_is_enabled(lcore_id) == 0)
2170 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2175 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
2177 txconf = &dev_info.default_txconf;
2178 txconf->offloads = local_port_conf.txmode.offloads;
2180 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
2183 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
2184 "err=%d, port=%d\n", ret, portid);
2186 qconf = &lcore_conf[lcore_id];
2187 qconf->tx_queue_id[portid] = tx_queueid;
2189 /* Pre-populate pkt offloads based on capabilities */
2190 qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
2191 qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
2192 if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
2193 qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
2197 /* init RX queues */
2198 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
2199 struct rte_eth_rxconf rxq_conf;
2201 if (portid != qconf->rx_queue_list[queue].port_id)
2204 rx_queueid = qconf->rx_queue_list[queue].queue_id;
2206 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
2209 rxq_conf = dev_info.default_rxconf;
2210 rxq_conf.offloads = local_port_conf.rxmode.offloads;
2211 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
2212 nb_rxd, socket_id, &rxq_conf,
2213 socket_ctx[socket_id].mbuf_pool);
2215 rte_exit(EXIT_FAILURE,
2216 "rte_eth_rx_queue_setup: err=%d, "
2217 "port=%d\n", ret, portid);
2224 max_session_size(void)
2228 int16_t cdev_id, port_id, n;
2231 n = rte_cryptodev_count();
2232 for (cdev_id = 0; cdev_id != n; cdev_id++) {
2233 sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
2237 * If crypto device is security capable, need to check the
2238 * size of security session as well.
2241 /* Get security context of the crypto device */
2242 sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
2243 if (sec_ctx == NULL)
2246 /* Get size of security session */
2247 sz = rte_security_session_get_size(sec_ctx);
2252 RTE_ETH_FOREACH_DEV(port_id) {
2253 if ((enabled_port_mask & (1 << port_id)) == 0)
2256 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
2257 if (sec_ctx == NULL)
2260 sz = rte_security_session_get_size(sec_ctx);
2269 session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
2271 char mp_name[RTE_MEMPOOL_NAMESIZE];
2272 struct rte_mempool *sess_mp;
2274 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2275 "sess_mp_%u", socket_id);
2276 sess_mp = rte_cryptodev_sym_session_pool_create(
2277 mp_name, CDEV_MP_NB_OBJS,
2278 sess_sz, CDEV_MP_CACHE_SZ, 0,
2280 ctx->session_pool = sess_mp;
2282 if (ctx->session_pool == NULL)
2283 rte_exit(EXIT_FAILURE,
2284 "Cannot init session pool on socket %d\n", socket_id);
2286 printf("Allocated session pool on socket %d\n", socket_id);
2290 session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
2293 char mp_name[RTE_MEMPOOL_NAMESIZE];
2294 struct rte_mempool *sess_mp;
2296 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2297 "sess_mp_priv_%u", socket_id);
2298 sess_mp = rte_mempool_create(mp_name,
2302 0, NULL, NULL, NULL,
2305 ctx->session_priv_pool = sess_mp;
2307 if (ctx->session_priv_pool == NULL)
2308 rte_exit(EXIT_FAILURE,
2309 "Cannot init session priv pool on socket %d\n",
2312 printf("Allocated session priv pool on socket %d\n",
2317 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
2322 snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
2323 ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
2324 MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
2325 frame_buf_size, socket_id);
2328 * if multi-segment support is enabled, then create a pool
2329 * for indirect mbufs.
2331 ms = multi_seg_required();
2333 snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
2334 ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
2335 MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
2338 if (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL))
2339 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
2342 printf("Allocated mbuf pool on socket %d\n", socket_id);
2346 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
2348 struct ipsec_sa *sa;
2350 /* For inline protocol processing, the metadata in the event will
2351 * uniquely identify the security session which raised the event.
2352 * Application would then need the userdata it had registered with the
2353 * security session to process the event.
2356 sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
2359 /* userdata could not be retrieved */
2363 /* Sequence number over flow. SA need to be re-established */
2369 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2370 void *param, void *ret_param)
2373 struct rte_eth_event_ipsec_desc *event_desc = NULL;
2374 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
2375 rte_eth_dev_get_sec_ctx(port_id);
2377 RTE_SET_USED(param);
2379 if (type != RTE_ETH_EVENT_IPSEC)
2382 event_desc = ret_param;
2383 if (event_desc == NULL) {
2384 printf("Event descriptor not set\n");
2388 md = event_desc->metadata;
2390 if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
2391 return inline_ipsec_event_esn_overflow(ctx, md);
2392 else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
2393 printf("Invalid IPsec event reported\n");
2401 rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
2402 struct rte_mbuf *pkt[], uint16_t nb_pkts,
2403 __rte_unused uint16_t max_pkts, void *user_param)
2407 struct lcore_conf *lc;
2408 struct rte_mbuf *mb;
2409 struct rte_ether_hdr *eth;
2415 for (i = 0; i != nb_pkts; i++) {
2418 eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
2419 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
2421 struct rte_ipv4_hdr *iph;
2423 iph = (struct rte_ipv4_hdr *)(eth + 1);
2424 if (rte_ipv4_frag_pkt_is_fragmented(iph)) {
2426 mb->l2_len = sizeof(*eth);
2427 mb->l3_len = sizeof(*iph);
2428 tm = (tm != 0) ? tm : rte_rdtsc();
2429 mb = rte_ipv4_frag_reassemble_packet(
2430 lc->frag.tbl, &lc->frag.dr,
2434 /* fix ip cksum after reassemble. */
2435 iph = rte_pktmbuf_mtod_offset(mb,
2436 struct rte_ipv4_hdr *,
2438 iph->hdr_checksum = 0;
2439 iph->hdr_checksum = rte_ipv4_cksum(iph);
2442 } else if (eth->ether_type ==
2443 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
2445 struct rte_ipv6_hdr *iph;
2446 struct ipv6_extension_fragment *fh;
2448 iph = (struct rte_ipv6_hdr *)(eth + 1);
2449 fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
2451 mb->l2_len = sizeof(*eth);
2452 mb->l3_len = (uintptr_t)fh - (uintptr_t)iph +
2454 tm = (tm != 0) ? tm : rte_rdtsc();
2455 mb = rte_ipv6_frag_reassemble_packet(
2456 lc->frag.tbl, &lc->frag.dr,
2459 /* fix l3_len after reassemble. */
2460 mb->l3_len = mb->l3_len - sizeof(*fh);
2468 /* some fragments were encountered, drain death row */
2470 rte_ip_frag_free_death_row(&lc->frag.dr, 0);
2477 reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)
2481 uint64_t frag_cycles;
2482 const struct lcore_rx_queue *rxq;
2483 const struct rte_eth_rxtx_callback *cb;
2485 /* create fragment table */
2486 sid = rte_lcore_to_socket_id(cid);
2487 frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) /
2488 NS_PER_S * frag_ttl_ns;
2490 lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
2491 FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
2492 if (lc->frag.tbl == NULL) {
2493 printf("%s(%u): failed to create fragment table of size: %u, "
2495 __func__, cid, frag_tbl_sz, rte_errno);
2499 /* setup reassemble RX callbacks for all queues */
2500 for (i = 0; i != lc->nb_rx_queue; i++) {
2502 rxq = lc->rx_queue_list + i;
2503 cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,
2506 printf("%s(%u): failed to install RX callback for "
2507 "portid=%u, queueid=%u, error code: %d\n",
2509 rxq->port_id, rxq->queue_id, rte_errno);
2518 reassemble_init(void)
2524 for (i = 0; i != nb_lcore_params; i++) {
2525 lc = lcore_params[i].lcore_id;
2526 rc = reassemble_lcore_init(lcore_conf + lc, lc);
2535 create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
2537 struct rte_flow_action action[2];
2538 struct rte_flow_item pattern[2];
2539 struct rte_flow_attr attr = {0};
2540 struct rte_flow_error err;
2541 struct rte_flow *flow;
2544 if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY))
2547 /* Add the default rte_flow to enable SECURITY for all ESP packets */
2549 pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
2550 pattern[0].spec = NULL;
2551 pattern[0].mask = NULL;
2552 pattern[0].last = NULL;
2553 pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
2555 action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
2556 action[0].conf = NULL;
2557 action[1].type = RTE_FLOW_ACTION_TYPE_END;
2558 action[1].conf = NULL;
2562 ret = rte_flow_validate(port_id, &attr, pattern, action, &err);
2566 flow = rte_flow_create(port_id, &attr, pattern, action, &err);
2570 flow_info_tbl[port_id].rx_def_flow = flow;
2571 RTE_LOG(INFO, IPSEC,
2572 "Created default flow enabling SECURITY for all ESP traffic on port %d\n",
2577 signal_handler(int signum)
2579 if (signum == SIGINT || signum == SIGTERM) {
2580 printf("\n\nSignal %d received, preparing to exit...\n",
2587 ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
2589 struct rte_ipsec_session *ips;
2595 for (i = 0; i < nb_sa; i++) {
2596 ips = ipsec_get_primary_session(&sa[i]);
2597 if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
2598 rte_exit(EXIT_FAILURE, "Event mode supports only "
2599 "inline protocol sessions\n");
2605 check_event_mode_params(struct eh_conf *eh_conf)
2607 struct eventmode_conf *em_conf = NULL;
2608 struct lcore_params *params;
2611 if (!eh_conf || !eh_conf->mode_params)
2614 /* Get eventmode conf */
2615 em_conf = eh_conf->mode_params;
2617 if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL &&
2618 em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) {
2619 printf("error: option --event-schedule-type applies only to "
2624 if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
2627 /* Set schedule type to ORDERED if it wasn't explicitly set by user */
2628 if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET)
2629 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
2632 * Event mode currently supports only inline protocol sessions.
2633 * If there are other types of sessions configured then exit with
2636 ev_mode_sess_verify(sa_in, nb_sa_in);
2637 ev_mode_sess_verify(sa_out, nb_sa_out);
2640 /* Option --config does not apply to event mode */
2641 if (nb_lcore_params > 0) {
2642 printf("error: option --config applies only to poll mode\n");
2647 * In order to use the same port_init routine for both poll and event
2648 * modes initialize lcore_params with one queue for each eth port
2650 lcore_params = lcore_params_array;
2651 RTE_ETH_FOREACH_DEV(portid) {
2652 if ((enabled_port_mask & (1 << portid)) == 0)
2655 params = &lcore_params[nb_lcore_params++];
2656 params->port_id = portid;
2657 params->queue_id = 0;
2658 params->lcore_id = rte_get_next_lcore(0, 0, 1);
2665 inline_sessions_free(struct sa_ctx *sa_ctx)
2667 struct rte_ipsec_session *ips;
2668 struct ipsec_sa *sa;
2675 for (i = 0; i < sa_ctx->nb_sa; i++) {
2677 sa = &sa_ctx->sa[i];
2681 ips = ipsec_get_primary_session(sa);
2682 if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
2683 ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
2686 if (!rte_eth_dev_is_valid_port(sa->portid))
2689 ret = rte_security_session_destroy(
2690 rte_eth_dev_get_sec_ctx(sa->portid),
2693 RTE_LOG(ERR, IPSEC, "Failed to destroy security "
2694 "session type %d, spi %d\n",
2695 ips->type, sa->spi);
2700 main(int32_t argc, char **argv)
2708 uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
2709 uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
2710 struct eh_conf *eh_conf = NULL;
2714 ret = rte_eal_init(argc, argv);
2716 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2721 signal(SIGINT, signal_handler);
2722 signal(SIGTERM, signal_handler);
2724 /* initialize event helper configuration */
2725 eh_conf = eh_conf_init();
2726 if (eh_conf == NULL)
2727 rte_exit(EXIT_FAILURE, "Failed to init event helper config");
2729 /* parse application arguments (after the EAL ones) */
2730 ret = parse_args(argc, argv, eh_conf);
2732 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
2734 /* parse configuration file */
2735 if (parse_cfg_file(cfgfile) < 0) {
2736 printf("parsing file \"%s\" failed\n",
2738 print_usage(argv[0]);
2742 if ((unprotected_port_mask & enabled_port_mask) !=
2743 unprotected_port_mask)
2744 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
2745 unprotected_port_mask);
2747 if (check_poll_mode_params(eh_conf) < 0)
2748 rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n");
2750 if (check_event_mode_params(eh_conf) < 0)
2751 rte_exit(EXIT_FAILURE, "check_event_mode_params failed\n");
2753 ret = init_lcore_rx_queues();
2755 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2757 nb_lcores = rte_lcore_count();
2759 sess_sz = max_session_size();
2761 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2762 if (rte_lcore_is_enabled(lcore_id) == 0)
2766 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2770 /* mbuf_pool is initialised by the pool_init() function*/
2771 if (socket_ctx[socket_id].mbuf_pool)
2774 pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
2775 session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
2776 session_priv_pool_init(&socket_ctx[socket_id], socket_id,
2780 RTE_ETH_FOREACH_DEV(portid) {
2781 if ((enabled_port_mask & (1 << portid)) == 0)
2784 sa_check_offloads(portid, &req_rx_offloads[portid],
2785 &req_tx_offloads[portid]);
2786 port_init(portid, req_rx_offloads[portid],
2787 req_tx_offloads[portid]);
2793 * Set the enabled port mask in helper config for use by helper
2794 * sub-system. This will be used while initializing devices using
2795 * helper sub-system.
2797 eh_conf->eth_portmask = enabled_port_mask;
2799 /* Initialize eventmode components */
2800 ret = eh_devs_init(eh_conf);
2802 rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret);
2805 RTE_ETH_FOREACH_DEV(portid) {
2806 if ((enabled_port_mask & (1 << portid)) == 0)
2809 /* Create flow before starting the device */
2810 create_default_ipsec_flow(portid, req_rx_offloads[portid]);
2812 ret = rte_eth_dev_start(portid);
2814 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
2815 "err=%d, port=%d\n", ret, portid);
2817 * If enabled, put device in promiscuous mode.
2818 * This allows IO forwarding mode to forward packets
2819 * to itself through 2 cross-connected ports of the
2822 if (promiscuous_on) {
2823 ret = rte_eth_promiscuous_enable(portid);
2825 rte_exit(EXIT_FAILURE,
2826 "rte_eth_promiscuous_enable: err=%s, port=%d\n",
2827 rte_strerror(-ret), portid);
2830 rte_eth_dev_callback_register(portid,
2831 RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
2834 /* fragment reassemble is enabled */
2835 if (frag_tbl_sz != 0) {
2836 ret = reassemble_init();
2838 rte_exit(EXIT_FAILURE, "failed at reassemble init");
2841 /* Replicate each context per socket */
2842 for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
2843 socket_id = rte_socket_id_by_idx(i);
2844 if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
2845 (socket_ctx[socket_id].sa_in == NULL) &&
2846 (socket_ctx[socket_id].sa_out == NULL)) {
2847 sa_init(&socket_ctx[socket_id], socket_id);
2848 sp4_init(&socket_ctx[socket_id], socket_id);
2849 sp6_init(&socket_ctx[socket_id], socket_id);
2850 rt_init(&socket_ctx[socket_id], socket_id);
2854 check_all_ports_link_status(enabled_port_mask);
2856 /* launch per-lcore init on every lcore */
2857 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2858 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2859 if (rte_eal_wait_lcore(lcore_id) < 0)
2863 /* Uninitialize eventmode components */
2864 ret = eh_devs_uninit(eh_conf);
2866 rte_exit(EXIT_FAILURE, "eh_devs_uninit failed, err=%d\n", ret);
2868 /* Free eventmode configuration memory */
2869 eh_conf_uninit(eh_conf);
2871 /* Destroy inline inbound and outbound sessions */
2872 for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
2873 socket_id = rte_socket_id_by_idx(i);
2874 inline_sessions_free(socket_ctx[socket_id].sa_in);
2875 inline_sessions_free(socket_ctx[socket_id].sa_out);
2878 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
2879 printf("Closing cryptodev %d...", cdev_id);
2880 rte_cryptodev_stop(cdev_id);
2881 rte_cryptodev_close(cdev_id);
2885 RTE_ETH_FOREACH_DEV(portid) {
2886 if ((enabled_port_mask & (1 << portid)) == 0)
2889 printf("Closing port %d...", portid);
2890 if (flow_info_tbl[portid].rx_def_flow) {
2891 struct rte_flow_error err;
2893 ret = rte_flow_destroy(portid,
2894 flow_info_tbl[portid].rx_def_flow, &err);
2896 RTE_LOG(ERR, IPSEC, "Failed to destroy flow "
2897 " for port %u, err msg: %s\n", portid,
2900 rte_eth_dev_stop(portid);
2901 rte_eth_dev_close(portid);