1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
10 #include <sys/types.h>
11 #include <netinet/in.h>
12 #include <netinet/ip.h>
13 #include <netinet/ip6.h>
15 #include <sys/queue.h>
21 #include <rte_common.h>
22 #include <rte_bitmap.h>
23 #include <rte_byteorder.h>
26 #include <rte_launch.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
37 #include <rte_mempool.h>
43 #include <rte_jhash.h>
44 #include <rte_cryptodev.h>
45 #include <rte_security.h>
46 #include <rte_eventdev.h>
48 #include <rte_ip_frag.h>
49 #include <rte_alarm.h>
50 #include <rte_telemetry.h>
52 #include "event_helper.h"
55 #include "ipsec_worker.h"
59 volatile bool force_quit;
61 #define MAX_JUMBO_PKT_LEN 9600
63 #define MEMPOOL_CACHE_SIZE 256
65 #define CDEV_QUEUE_DESC 2048
66 #define CDEV_MAP_ENTRIES 16384
67 #define CDEV_MP_CACHE_SZ 64
68 #define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
69 #define MAX_QUEUE_PAIRS 1
71 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
73 /* Configure how many packets ahead to prefetch, when reading packets */
74 #define PREFETCH_OFFSET 3
76 #define MAX_RX_QUEUE_PER_LCORE 16
78 #define MAX_LCORE_PARAMS 1024
81 * Configurable number of RX/TX ring descriptors
83 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
84 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
85 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
86 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
88 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
89 (addr)->addr_bytes[0], (addr)->addr_bytes[1], \
90 (addr)->addr_bytes[2], (addr)->addr_bytes[3], \
91 (addr)->addr_bytes[4], (addr)->addr_bytes[5], \
94 #define FRAG_TBL_BUCKET_ENTRIES 4
95 #define MAX_FRAG_TTL_NS (10LL * NS_PER_S)
97 #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
99 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
100 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
101 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
102 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
103 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
106 struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS];
108 #define CMD_LINE_OPT_CONFIG "config"
109 #define CMD_LINE_OPT_SINGLE_SA "single-sa"
110 #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
111 #define CMD_LINE_OPT_TRANSFER_MODE "transfer-mode"
112 #define CMD_LINE_OPT_SCHEDULE_TYPE "event-schedule-type"
113 #define CMD_LINE_OPT_RX_OFFLOAD "rxoffload"
114 #define CMD_LINE_OPT_TX_OFFLOAD "txoffload"
115 #define CMD_LINE_OPT_REASSEMBLE "reassemble"
116 #define CMD_LINE_OPT_MTU "mtu"
117 #define CMD_LINE_OPT_FRAG_TTL "frag-ttl"
118 #define CMD_LINE_OPT_EVENT_VECTOR "event-vector"
119 #define CMD_LINE_OPT_VECTOR_SIZE "vector-size"
120 #define CMD_LINE_OPT_VECTOR_TIMEOUT "vector-tmo"
121 #define CMD_LINE_OPT_VECTOR_POOL_SZ "vector-pool-sz"
122 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
124 #define CMD_LINE_ARG_EVENT "event"
125 #define CMD_LINE_ARG_POLL "poll"
126 #define CMD_LINE_ARG_ORDERED "ordered"
127 #define CMD_LINE_ARG_ATOMIC "atomic"
128 #define CMD_LINE_ARG_PARALLEL "parallel"
131 /* long options mapped to a short option */
133 /* first long only option value must be >= 256, so that we won't
134 * conflict with short options
136 CMD_LINE_OPT_MIN_NUM = 256,
137 CMD_LINE_OPT_CONFIG_NUM,
138 CMD_LINE_OPT_SINGLE_SA_NUM,
139 CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
140 CMD_LINE_OPT_TRANSFER_MODE_NUM,
141 CMD_LINE_OPT_SCHEDULE_TYPE_NUM,
142 CMD_LINE_OPT_RX_OFFLOAD_NUM,
143 CMD_LINE_OPT_TX_OFFLOAD_NUM,
144 CMD_LINE_OPT_REASSEMBLE_NUM,
145 CMD_LINE_OPT_MTU_NUM,
146 CMD_LINE_OPT_FRAG_TTL_NUM,
147 CMD_LINE_OPT_EVENT_VECTOR_NUM,
148 CMD_LINE_OPT_VECTOR_SIZE_NUM,
149 CMD_LINE_OPT_VECTOR_TIMEOUT_NUM,
150 CMD_LINE_OPT_VECTOR_POOL_SZ_NUM,
151 CMD_LINE_OPT_PER_PORT_POOL_NUM,
154 static const struct option lgopts[] = {
155 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
156 {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
157 {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
158 {CMD_LINE_OPT_TRANSFER_MODE, 1, 0, CMD_LINE_OPT_TRANSFER_MODE_NUM},
159 {CMD_LINE_OPT_SCHEDULE_TYPE, 1, 0, CMD_LINE_OPT_SCHEDULE_TYPE_NUM},
160 {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
161 {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
162 {CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
163 {CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
164 {CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM},
165 {CMD_LINE_OPT_EVENT_VECTOR, 0, 0, CMD_LINE_OPT_EVENT_VECTOR_NUM},
166 {CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
167 {CMD_LINE_OPT_VECTOR_TIMEOUT, 1, 0, CMD_LINE_OPT_VECTOR_TIMEOUT_NUM},
168 {CMD_LINE_OPT_VECTOR_POOL_SZ, 1, 0, CMD_LINE_OPT_VECTOR_POOL_SZ_NUM},
169 {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PER_PORT_POOL_NUM},
173 uint32_t unprotected_port_mask;
174 uint32_t single_sa_idx;
175 /* mask of enabled ports */
176 static uint32_t enabled_port_mask;
177 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
178 static int32_t promiscuous_on = 1;
179 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
180 static uint32_t nb_lcores;
181 static uint32_t single_sa;
182 uint32_t nb_bufs_in_pool;
185 * RX/TX HW offload capabilities to enable/use on ethernet ports.
186 * By default all capabilities are enabled.
188 static uint64_t dev_rx_offload = UINT64_MAX;
189 static uint64_t dev_tx_offload = UINT64_MAX;
192 * global values that determine multi-seg policy
194 static uint32_t frag_tbl_sz;
195 static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
196 static uint32_t mtu_size = RTE_ETHER_MTU;
197 static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
198 static uint32_t stats_interval;
200 /* application wide librte_ipsec/SA parameters */
201 struct app_sa_prm app_sa_prm = {
203 .cache_sz = SA_CACHE_SZ,
206 static const char *cfgfile;
208 struct lcore_rx_queue {
211 } __rte_cache_aligned;
213 struct lcore_params {
217 } __rte_cache_aligned;
219 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
221 static struct lcore_params *lcore_params;
222 static uint16_t nb_lcore_params;
224 static struct rte_hash *cdev_map_in;
225 static struct rte_hash *cdev_map_out;
229 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
233 uint16_t nb_rx_queue;
234 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
235 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
236 struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
237 struct ipsec_ctx inbound;
238 struct ipsec_ctx outbound;
239 struct rt_ctx *rt4_ctx;
240 struct rt_ctx *rt6_ctx;
242 struct rte_ip_frag_tbl *tbl;
243 struct rte_mempool *pool_indir;
244 struct rte_ip_frag_death_row dr;
246 } __rte_cache_aligned;
248 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
250 static struct rte_eth_conf port_conf = {
252 .mq_mode = RTE_ETH_MQ_RX_RSS,
254 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
259 .rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
260 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
264 .mq_mode = RTE_ETH_MQ_TX_NONE,
268 struct socket_ctx socket_ctx[NB_SOCKETS];
273 * Determine is multi-segment support required:
274 * - either frame buffer size is smaller then mtu
275 * - or reassemble support is requested
278 multi_seg_required(void)
280 return (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM >
281 frame_buf_size || frag_tbl_sz != 0);
285 adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
290 plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
291 if (plen < m->pkt_len) {
292 trim = m->pkt_len - plen;
293 rte_pktmbuf_trim(m, trim);
298 adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
303 plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
304 if (plen < m->pkt_len) {
305 trim = m->pkt_len - plen;
306 rte_pktmbuf_trim(m, trim);
311 struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
313 /* Print out statistics on packet distribution */
315 print_stats_cb(__rte_unused void *param)
317 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
318 float burst_percent, rx_per_call, tx_per_call;
321 total_packets_dropped = 0;
322 total_packets_tx = 0;
323 total_packets_rx = 0;
325 const char clr[] = { 27, '[', '2', 'J', '\0' };
326 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
328 /* Clear screen and move to top left */
329 printf("%s%s", clr, topLeft);
331 printf("\nCore statistics ====================================");
333 for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
334 /* skip disabled cores */
335 if (rte_lcore_is_enabled(coreid) == 0)
337 burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
338 core_statistics[coreid].rx;
339 rx_per_call = (float)(core_statistics[coreid].rx)/
340 core_statistics[coreid].rx_call;
341 tx_per_call = (float)(core_statistics[coreid].tx)/
342 core_statistics[coreid].tx_call;
343 printf("\nStatistics for core %u ------------------------------"
344 "\nPackets received: %20"PRIu64
345 "\nPackets sent: %24"PRIu64
346 "\nPackets dropped: %21"PRIu64
347 "\nBurst percent: %23.2f"
348 "\nPackets per Rx call: %17.2f"
349 "\nPackets per Tx call: %17.2f",
351 core_statistics[coreid].rx,
352 core_statistics[coreid].tx,
353 core_statistics[coreid].dropped,
358 total_packets_dropped += core_statistics[coreid].dropped;
359 total_packets_tx += core_statistics[coreid].tx;
360 total_packets_rx += core_statistics[coreid].rx;
362 printf("\nAggregate statistics ==============================="
363 "\nTotal packets received: %14"PRIu64
364 "\nTotal packets sent: %18"PRIu64
365 "\nTotal packets dropped: %15"PRIu64,
368 total_packets_dropped);
369 printf("\n====================================================\n");
371 rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL);
375 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
377 const struct rte_ether_hdr *eth;
378 const struct rte_ipv4_hdr *iph4;
379 const struct rte_ipv6_hdr *iph6;
380 const struct rte_udp_hdr *udp;
381 uint16_t ip4_hdr_len;
384 eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
385 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
387 iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
389 adjust_ipv4_pktlen(pkt, iph4, 0);
391 switch (iph4->next_proto_id) {
393 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
396 if (app_sa_prm.udp_encap == 1) {
397 ip4_hdr_len = ((iph4->version_ihl &
398 RTE_IPV4_HDR_IHL_MASK) *
399 RTE_IPV4_IHL_MULTIPLIER);
400 udp = rte_pktmbuf_mtod_offset(pkt,
401 struct rte_udp_hdr *, ip4_hdr_len);
402 nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
403 if (udp->src_port == nat_port ||
404 udp->dst_port == nat_port){
405 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
407 MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
413 t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
414 t->ip4.pkts[(t->ip4.num)++] = pkt;
417 pkt->l3_len = sizeof(*iph4);
418 pkt->packet_type |= RTE_PTYPE_L3_IPV4;
419 if (pkt->packet_type & RTE_PTYPE_L4_TCP)
420 pkt->l4_len = sizeof(struct rte_tcp_hdr);
421 else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
422 pkt->l4_len = sizeof(struct rte_udp_hdr);
423 } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
425 size_t l3len, ext_len;
428 /* get protocol type */
429 iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
431 adjust_ipv6_pktlen(pkt, iph6, 0);
433 next_proto = iph6->proto;
435 /* determine l3 header size up to ESP extension */
436 l3len = sizeof(struct ip6_hdr);
437 p = rte_pktmbuf_mtod(pkt, uint8_t *);
438 while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
439 (next_proto = rte_ipv6_get_next_ext(p + l3len,
440 next_proto, &ext_len)) >= 0)
443 /* drop packet when IPv6 header exceeds first segment length */
444 if (unlikely(l3len > pkt->data_len)) {
449 switch (next_proto) {
451 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
454 if (app_sa_prm.udp_encap == 1) {
455 udp = rte_pktmbuf_mtod_offset(pkt,
456 struct rte_udp_hdr *, l3len);
457 nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
458 if (udp->src_port == nat_port ||
459 udp->dst_port == nat_port){
460 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
462 MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
468 t->ip6.data[t->ip6.num] = &iph6->proto;
469 t->ip6.pkts[(t->ip6.num)++] = pkt;
473 pkt->packet_type |= RTE_PTYPE_L3_IPV6;
475 /* Unknown/Unsupported type, drop the packet */
476 RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
477 rte_be_to_cpu_16(eth->ether_type));
482 /* Check if the packet has been processed inline. For inline protocol
483 * processed packets, the metadata in the mbuf can be used to identify
484 * the security processing done on the packet. The metadata will be
485 * used to retrieve the application registered userdata associated
486 * with the security session.
489 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
490 rte_security_dynfield_is_registered()) {
492 struct ipsec_mbuf_metadata *priv;
493 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
494 rte_eth_dev_get_sec_ctx(
497 /* Retrieve the userdata registered. Here, the userdata
498 * registered is the SA pointer.
500 sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
501 *rte_security_dynfield(pkt));
503 /* userdata could not be retrieved */
507 /* Save SA as priv member in mbuf. This will be used in the
508 * IPsec selector(SP-SA) check.
511 priv = get_priv(pkt);
517 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
526 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
527 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
529 prepare_one_packet(pkts[i], t);
531 /* Process left packets */
532 for (; i < nb_pkts; i++)
533 prepare_one_packet(pkts[i], t);
537 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
538 const struct lcore_conf *qconf)
541 struct rte_ether_hdr *ethhdr;
543 ip = rte_pktmbuf_mtod(pkt, struct ip *);
545 ethhdr = (struct rte_ether_hdr *)
546 rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
548 if (ip->ip_v == IPVERSION) {
549 pkt->ol_flags |= qconf->outbound.ipv4_offloads;
550 pkt->l3_len = sizeof(struct ip);
551 pkt->l2_len = RTE_ETHER_HDR_LEN;
555 /* calculate IPv4 cksum in SW */
556 if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
557 ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
559 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
561 pkt->ol_flags |= qconf->outbound.ipv6_offloads;
562 pkt->l3_len = sizeof(struct ip6_hdr);
563 pkt->l2_len = RTE_ETHER_HDR_LEN;
565 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
568 memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
569 sizeof(struct rte_ether_addr));
570 memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
571 sizeof(struct rte_ether_addr));
575 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
576 const struct lcore_conf *qconf)
579 const int32_t prefetch_offset = 2;
581 for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
582 rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
583 prepare_tx_pkt(pkts[i], port, qconf);
585 /* Process left packets */
586 for (; i < nb_pkts; i++)
587 prepare_tx_pkt(pkts[i], port, qconf);
590 /* Send burst of packets on an output interface */
591 static inline int32_t
592 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
594 struct rte_mbuf **m_table;
598 queueid = qconf->tx_queue_id[port];
599 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
601 prepare_tx_burst(m_table, n, port, qconf);
603 ret = rte_eth_tx_burst(port, queueid, m_table, n);
605 core_stats_update_tx(ret);
607 if (unlikely(ret < n)) {
609 free_pkts(&m_table[ret], 1);
617 * Helper function to fragment and queue for TX one packet.
619 static inline uint32_t
620 send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
621 uint16_t port, uint8_t proto)
627 tbl = qconf->tx_mbufs + port;
630 /* free space for new fragments */
631 if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
632 send_burst(qconf, len, port);
636 n = RTE_DIM(tbl->m_table) - len;
638 if (proto == IPPROTO_IP)
639 rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
640 n, mtu_size, m->pool, qconf->frag.pool_indir);
642 rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
643 n, mtu_size, m->pool, qconf->frag.pool_indir);
649 "%s: failed to fragment packet with size %u, "
651 __func__, m->pkt_len, rte_errno);
657 /* Enqueue a single packet, and send burst if queue is filled */
658 static inline int32_t
659 send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
663 struct lcore_conf *qconf;
665 lcore_id = rte_lcore_id();
667 qconf = &lcore_conf[lcore_id];
668 len = qconf->tx_mbufs[port].len;
670 if (m->pkt_len <= mtu_size) {
671 qconf->tx_mbufs[port].m_table[len] = m;
674 /* need to fragment the packet */
675 } else if (frag_tbl_sz > 0)
676 len = send_fragment_packet(qconf, m, port, proto);
680 /* enough pkts to be sent */
681 if (unlikely(len == MAX_PKT_BURST)) {
682 send_burst(qconf, MAX_PKT_BURST, port);
686 qconf->tx_mbufs[port].len = len;
691 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
692 uint16_t lim, struct ipsec_spd_stats *stats)
695 uint32_t i, j, res, sa_idx;
697 if (ip->num == 0 || sp == NULL)
700 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
701 ip->num, DEFAULT_MAX_CATEGORIES);
704 for (i = 0; i < ip->num; i++) {
712 if (res == DISCARD) {
718 /* Only check SPI match for processed IPSec packets */
719 if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
726 if (!inbound_sa_check(sa, m, sa_idx)) {
738 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
747 for (i = 0; i < num; i++) {
750 ip = rte_pktmbuf_mtod(m, struct ip *);
752 if (ip->ip_v == IPVERSION) {
753 trf->ip4.pkts[n4] = m;
754 trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
755 uint8_t *, offsetof(struct ip, ip_p));
757 } else if (ip->ip_v == IP6_VERSION) {
758 trf->ip6.pkts[n6] = m;
759 trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
761 offsetof(struct ip6_hdr, ip6_nxt));
773 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
774 struct ipsec_traffic *traffic)
776 unsigned int lcoreid = rte_lcore_id();
777 uint16_t nb_pkts_in, n_ip4, n_ip6;
779 n_ip4 = traffic->ip4.num;
780 n_ip6 = traffic->ip6.num;
782 if (app_sa_prm.enable == 0) {
783 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
784 traffic->ipsec.num, MAX_PKT_BURST);
785 split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
787 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
788 traffic->ipsec.saptr, traffic->ipsec.num);
789 ipsec_process(ipsec_ctx, traffic);
792 inbound_sp_sa(ipsec_ctx->sp4_ctx,
793 ipsec_ctx->sa_ctx, &traffic->ip4, n_ip4,
794 &core_statistics[lcoreid].inbound.spd4);
796 inbound_sp_sa(ipsec_ctx->sp6_ctx,
797 ipsec_ctx->sa_ctx, &traffic->ip6, n_ip6,
798 &core_statistics[lcoreid].inbound.spd6);
802 outbound_spd_lookup(struct sp_ctx *sp,
803 struct traffic_type *ip,
804 struct traffic_type *ipsec,
805 struct ipsec_spd_stats *stats)
808 uint32_t i, j, sa_idx;
810 if (ip->num == 0 || sp == NULL)
813 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
814 ip->num, DEFAULT_MAX_CATEGORIES);
816 for (i = 0, j = 0; i < ip->num; i++) {
818 sa_idx = ip->res[i] - 1;
820 if (unlikely(ip->res[i] == DISCARD)) {
824 } else if (unlikely(ip->res[i] == BYPASS)) {
829 ipsec->res[ipsec->num] = sa_idx;
830 ipsec->pkts[ipsec->num++] = m;
839 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
840 struct ipsec_traffic *traffic)
843 uint16_t idx, nb_pkts_out, i;
844 unsigned int lcoreid = rte_lcore_id();
846 /* Drop any IPsec traffic from protected ports */
847 free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
849 traffic->ipsec.num = 0;
851 outbound_spd_lookup(ipsec_ctx->sp4_ctx,
852 &traffic->ip4, &traffic->ipsec,
853 &core_statistics[lcoreid].outbound.spd4);
855 outbound_spd_lookup(ipsec_ctx->sp6_ctx,
856 &traffic->ip6, &traffic->ipsec,
857 &core_statistics[lcoreid].outbound.spd6);
859 if (app_sa_prm.enable == 0) {
861 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
862 traffic->ipsec.res, traffic->ipsec.num,
865 for (i = 0; i < nb_pkts_out; i++) {
866 m = traffic->ipsec.pkts[i];
867 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
868 if (ip->ip_v == IPVERSION) {
869 idx = traffic->ip4.num++;
870 traffic->ip4.pkts[idx] = m;
872 idx = traffic->ip6.num++;
873 traffic->ip6.pkts[idx] = m;
877 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
878 traffic->ipsec.saptr, traffic->ipsec.num);
879 ipsec_process(ipsec_ctx, traffic);
884 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
885 struct ipsec_traffic *traffic)
888 uint32_t nb_pkts_in, i, idx;
890 if (app_sa_prm.enable == 0) {
892 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
893 traffic->ipsec.num, MAX_PKT_BURST);
895 for (i = 0; i < nb_pkts_in; i++) {
896 m = traffic->ipsec.pkts[i];
897 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
898 if (ip->ip_v == IPVERSION) {
899 idx = traffic->ip4.num++;
900 traffic->ip4.pkts[idx] = m;
902 idx = traffic->ip6.num++;
903 traffic->ip6.pkts[idx] = m;
907 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
908 traffic->ipsec.saptr, traffic->ipsec.num);
909 ipsec_process(ipsec_ctx, traffic);
914 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
915 struct ipsec_traffic *traffic)
918 uint32_t nb_pkts_out, i, n;
921 /* Drop any IPsec traffic from protected ports */
922 free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
926 for (i = 0; i < traffic->ip4.num; i++) {
927 traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
928 traffic->ipsec.res[n++] = single_sa_idx;
931 for (i = 0; i < traffic->ip6.num; i++) {
932 traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
933 traffic->ipsec.res[n++] = single_sa_idx;
936 traffic->ip4.num = 0;
937 traffic->ip6.num = 0;
938 traffic->ipsec.num = n;
940 if (app_sa_prm.enable == 0) {
942 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
943 traffic->ipsec.res, traffic->ipsec.num,
946 /* They all sue the same SA (ip4 or ip6 tunnel) */
947 m = traffic->ipsec.pkts[0];
948 ip = rte_pktmbuf_mtod(m, struct ip *);
949 if (ip->ip_v == IPVERSION) {
950 traffic->ip4.num = nb_pkts_out;
951 for (i = 0; i < nb_pkts_out; i++)
952 traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
954 traffic->ip6.num = nb_pkts_out;
955 for (i = 0; i < nb_pkts_out; i++)
956 traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
959 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
960 traffic->ipsec.saptr, traffic->ipsec.num);
961 ipsec_process(ipsec_ctx, traffic);
965 static inline int32_t
966 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
968 struct ipsec_mbuf_metadata *priv;
971 priv = get_priv(pkt);
974 if (unlikely(sa == NULL)) {
975 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
983 return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
994 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
996 uint32_t hop[MAX_PKT_BURST * 2];
997 uint32_t dst_ip[MAX_PKT_BURST * 2];
1000 uint16_t lpm_pkts = 0;
1001 unsigned int lcoreid = rte_lcore_id();
1006 /* Need to do an LPM lookup for non-inline packets. Inline packets will
1007 * have port ID in the SA
1010 for (i = 0; i < nb_pkts; i++) {
1011 if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
1012 /* Security offload not enabled. So an LPM lookup is
1013 * required to get the hop
1015 offset = offsetof(struct ip, ip_dst);
1016 dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
1017 uint32_t *, offset);
1018 dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
1023 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
1027 for (i = 0; i < nb_pkts; i++) {
1028 if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
1029 /* Read hop from the SA */
1030 pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
1032 /* Need to use hop returned by lookup */
1033 pkt_hop = hop[lpm_pkts++];
1036 if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
1037 core_statistics[lcoreid].lpm4.miss++;
1038 free_pkts(&pkts[i], 1);
1041 send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
1046 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
1048 int32_t hop[MAX_PKT_BURST * 2];
1049 uint8_t dst_ip[MAX_PKT_BURST * 2][16];
1051 int32_t pkt_hop = 0;
1053 uint16_t lpm_pkts = 0;
1054 unsigned int lcoreid = rte_lcore_id();
1059 /* Need to do an LPM lookup for non-inline packets. Inline packets will
1060 * have port ID in the SA
1063 for (i = 0; i < nb_pkts; i++) {
1064 if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
1065 /* Security offload not enabled. So an LPM lookup is
1066 * required to get the hop
1068 offset = offsetof(struct ip6_hdr, ip6_dst);
1069 ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
1071 memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
1076 rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
1081 for (i = 0; i < nb_pkts; i++) {
1082 if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
1083 /* Read hop from the SA */
1084 pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
1086 /* Need to use hop returned by lookup */
1087 pkt_hop = hop[lpm_pkts++];
1090 if (pkt_hop == -1) {
1091 core_statistics[lcoreid].lpm6.miss++;
1092 free_pkts(&pkts[i], 1);
1095 send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
1100 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
1101 uint8_t nb_pkts, uint16_t portid)
1103 struct ipsec_traffic traffic;
1105 prepare_traffic(pkts, &traffic, nb_pkts);
1107 if (unlikely(single_sa)) {
1108 if (is_unprotected_port(portid))
1109 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
1111 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
1113 if (is_unprotected_port(portid))
1114 process_pkts_inbound(&qconf->inbound, &traffic);
1116 process_pkts_outbound(&qconf->outbound, &traffic);
1119 route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
1120 route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
1124 drain_tx_buffers(struct lcore_conf *qconf)
1129 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1130 buf = &qconf->tx_mbufs[portid];
1133 send_burst(qconf, buf->len, portid);
1139 drain_crypto_buffers(struct lcore_conf *qconf)
1142 struct ipsec_ctx *ctx;
1144 /* drain inbound buffers*/
1145 ctx = &qconf->inbound;
1146 for (i = 0; i != ctx->nb_qps; i++) {
1147 if (ctx->tbl[i].len != 0)
1148 enqueue_cop_burst(ctx->tbl + i);
1151 /* drain outbound buffers*/
1152 ctx = &qconf->outbound;
1153 for (i = 0; i != ctx->nb_qps; i++) {
1154 if (ctx->tbl[i].len != 0)
1155 enqueue_cop_burst(ctx->tbl + i);
1160 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
1161 struct ipsec_ctx *ctx)
1164 struct ipsec_traffic trf;
1165 unsigned int lcoreid = rte_lcore_id();
1167 if (app_sa_prm.enable == 0) {
1169 /* dequeue packets from crypto-queue */
1170 n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
1171 RTE_DIM(trf.ipsec.pkts));
1176 /* split traffic by ipv4-ipv6 */
1177 split46_traffic(&trf, trf.ipsec.pkts, n);
1179 ipsec_cqp_process(ctx, &trf);
1181 /* process ipv4 packets */
1182 if (trf.ip4.num != 0) {
1183 inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0,
1184 &core_statistics[lcoreid].inbound.spd4);
1185 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
1188 /* process ipv6 packets */
1189 if (trf.ip6.num != 0) {
1190 inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0,
1191 &core_statistics[lcoreid].inbound.spd6);
1192 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1197 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
1198 struct ipsec_ctx *ctx)
1201 struct ipsec_traffic trf;
1203 if (app_sa_prm.enable == 0) {
1205 /* dequeue packets from crypto-queue */
1206 n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
1207 RTE_DIM(trf.ipsec.pkts));
1212 /* split traffic by ipv4-ipv6 */
1213 split46_traffic(&trf, trf.ipsec.pkts, n);
1215 ipsec_cqp_process(ctx, &trf);
1217 /* process ipv4 packets */
1218 if (trf.ip4.num != 0)
1219 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
1221 /* process ipv6 packets */
1222 if (trf.ip6.num != 0)
1223 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1226 /* main processing loop */
1228 ipsec_poll_mode_worker(void)
1230 struct rte_mbuf *pkts[MAX_PKT_BURST];
1232 uint64_t prev_tsc, diff_tsc, cur_tsc;
1236 struct lcore_conf *qconf;
1237 int32_t rc, socket_id;
1238 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1239 / US_PER_S * BURST_TX_DRAIN_US;
1240 struct lcore_rx_queue *rxql;
1243 lcore_id = rte_lcore_id();
1244 qconf = &lcore_conf[lcore_id];
1245 rxql = qconf->rx_queue_list;
1246 socket_id = rte_lcore_to_socket_id(lcore_id);
1248 qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
1249 qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
1250 qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
1251 qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
1252 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
1253 qconf->inbound.cdev_map = cdev_map_in;
1254 qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
1255 qconf->inbound.session_priv_pool =
1256 socket_ctx[socket_id].session_priv_pool;
1257 qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
1258 qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
1259 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
1260 qconf->outbound.cdev_map = cdev_map_out;
1261 qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
1262 qconf->outbound.session_priv_pool =
1263 socket_ctx[socket_id].session_priv_pool;
1264 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1266 rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
1269 "SAD cache init on lcore %u, failed with code: %d\n",
1274 if (qconf->nb_rx_queue == 0) {
1275 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1280 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1282 for (i = 0; i < qconf->nb_rx_queue; i++) {
1283 portid = rxql[i].port_id;
1284 queueid = rxql[i].queue_id;
1285 RTE_LOG(INFO, IPSEC,
1286 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1287 lcore_id, portid, queueid);
1290 while (!force_quit) {
1291 cur_tsc = rte_rdtsc();
1293 /* TX queue buffer drain */
1294 diff_tsc = cur_tsc - prev_tsc;
1296 if (unlikely(diff_tsc > drain_tsc)) {
1297 drain_tx_buffers(qconf);
1298 drain_crypto_buffers(qconf);
1302 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1304 /* Read packets from RX queues */
1305 portid = rxql[i].port_id;
1306 queueid = rxql[i].queue_id;
1307 nb_rx = rte_eth_rx_burst(portid, queueid,
1308 pkts, MAX_PKT_BURST);
1311 core_stats_update_rx(nb_rx);
1312 process_pkts(qconf, pkts, nb_rx, portid);
1315 /* dequeue and process completed crypto-ops */
1316 if (is_unprotected_port(portid))
1317 drain_inbound_crypto_queues(qconf,
1320 drain_outbound_crypto_queues(qconf,
1327 check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
1333 for (i = 0; i < nb_lcore_params; ++i) {
1334 portid = lcore_params_array[i].port_id;
1335 if (portid == fdir_portid) {
1336 queueid = lcore_params_array[i].queue_id;
1337 if (queueid == fdir_qid)
1341 if (i == nb_lcore_params - 1)
1349 check_poll_mode_params(struct eh_conf *eh_conf)
1359 if (eh_conf->mode != EH_PKT_TRANSFER_MODE_POLL)
1362 if (lcore_params == NULL) {
1363 printf("Error: No port/queue/core mappings\n");
1367 for (i = 0; i < nb_lcore_params; ++i) {
1368 lcore = lcore_params[i].lcore_id;
1369 if (!rte_lcore_is_enabled(lcore)) {
1370 printf("error: lcore %hhu is not enabled in "
1371 "lcore mask\n", lcore);
1374 socket_id = rte_lcore_to_socket_id(lcore);
1375 if (socket_id != 0 && numa_on == 0) {
1376 printf("warning: lcore %hhu is on socket %d "
1380 portid = lcore_params[i].port_id;
1381 if ((enabled_port_mask & (1 << portid)) == 0) {
1382 printf("port %u is not enabled in port mask\n", portid);
1385 if (!rte_eth_dev_is_valid_port(portid)) {
1386 printf("port %u is not present on the board\n", portid);
1394 get_port_nb_rx_queues(const uint16_t port)
1399 for (i = 0; i < nb_lcore_params; ++i) {
1400 if (lcore_params[i].port_id == port &&
1401 lcore_params[i].queue_id > queue)
1402 queue = lcore_params[i].queue_id;
1404 return (uint8_t)(++queue);
1408 init_lcore_rx_queues(void)
1410 uint16_t i, nb_rx_queue;
1413 for (i = 0; i < nb_lcore_params; ++i) {
1414 lcore = lcore_params[i].lcore_id;
1415 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
1416 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1417 printf("error: too many queues (%u) for lcore: %u\n",
1418 nb_rx_queue + 1, lcore);
1421 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1422 lcore_params[i].port_id;
1423 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1424 lcore_params[i].queue_id;
1425 lcore_conf[lcore].nb_rx_queue++;
1432 print_usage(const char *prgname)
1434 fprintf(stderr, "%s [EAL options] --"
1440 " [-w REPLAY_WINDOW_SIZE]"
1444 " [-t STATS_INTERVAL]"
1445 " [-s NUMBER_OF_MBUFS_IN_PKT_POOL]"
1447 " --config (port,queue,lcore)[,(port,queue,lcore)]"
1448 " [--single-sa SAIDX]"
1449 " [--cryptodev_mask MASK]"
1450 " [--transfer-mode MODE]"
1451 " [--event-schedule-type TYPE]"
1452 " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
1453 " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
1454 " [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
1455 " [--" CMD_LINE_OPT_MTU " MTU]"
1457 " [--vector-size SIZE]"
1458 " [--vector-tmo TIMEOUT in ns]"
1460 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
1461 " -P : Enable promiscuous mode\n"
1462 " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
1463 " -j FRAMESIZE: Data buffer size, minimum (and default)\n"
1464 " value: RTE_MBUF_DEFAULT_BUF_SIZE\n"
1465 " -l enables code-path that uses librte_ipsec\n"
1466 " -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n"
1467 " size for each SA\n"
1469 " -a enables SA SQN atomic behaviour\n"
1470 " -c specifies inbound SAD cache size,\n"
1471 " zero value disables the cache (default value: 128)\n"
1472 " -t specifies statistics screen update interval,\n"
1473 " zero disables statistics screen (default value: 0)\n"
1474 " -s number of mbufs in packet pool, if not specified number\n"
1475 " of mbufs will be calculated based on number of cores,\n"
1476 " ports and crypto queues\n"
1477 " -f CONFIG_FILE: Configuration file\n"
1478 " --config (port,queue,lcore): Rx queue configuration. In poll\n"
1479 " mode determines which queues from\n"
1480 " which ports are mapped to which cores.\n"
1481 " In event mode this option is not used\n"
1482 " as packets are dynamically scheduled\n"
1483 " to cores by HW.\n"
1484 " --single-sa SAIDX: In poll mode use single SA index for\n"
1485 " outbound traffic, bypassing the SP\n"
1486 " In event mode selects driver submode,\n"
1487 " SA index value is ignored\n"
1488 " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
1489 " devices to configure\n"
1490 " --transfer-mode MODE\n"
1491 " \"poll\" : Packet transfer via polling (default)\n"
1492 " \"event\" : Packet transfer via event device\n"
1493 " --event-schedule-type TYPE queue schedule type, used only when\n"
1494 " transfer mode is set to event\n"
1495 " \"ordered\" : Ordered (default)\n"
1496 " \"atomic\" : Atomic\n"
1497 " \"parallel\" : Parallel\n"
1498 " --" CMD_LINE_OPT_RX_OFFLOAD
1499 ": bitmask of the RX HW offload capabilities to enable/use\n"
1500 " (RTE_ETH_RX_OFFLOAD_*)\n"
1501 " --" CMD_LINE_OPT_TX_OFFLOAD
1502 ": bitmask of the TX HW offload capabilities to enable/use\n"
1503 " (RTE_ETH_TX_OFFLOAD_*)\n"
1504 " --" CMD_LINE_OPT_REASSEMBLE " NUM"
1505 ": max number of entries in reassemble(fragment) table\n"
1506 " (zero (default value) disables reassembly)\n"
1507 " --" CMD_LINE_OPT_MTU " MTU"
1508 ": MTU value on all ports (default value: 1500)\n"
1509 " outgoing packets with bigger size will be fragmented\n"
1510 " incoming packets with bigger size will be discarded\n"
1511 " --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS"
1512 ": fragments lifetime in nanoseconds, default\n"
1513 " and maximum value is 10.000.000.000 ns (10 s)\n"
1514 " --event-vector enables event vectorization\n"
1515 " --vector-size Max vector size (default value: 16)\n"
1516 " --vector-tmo Max vector timeout in nanoseconds"
1517 " (default value: 102400)\n"
1518 " --" CMD_LINE_OPT_PER_PORT_POOL " Enable per port mbuf pool\n"
1519 " --" CMD_LINE_OPT_VECTOR_POOL_SZ " Vector pool size\n"
1520 " (default value is based on mbuf count)\n"
1526 parse_mask(const char *str, uint64_t *val)
1532 t = strtoul(str, &end, 0);
1533 if (errno != 0 || end[0] != 0)
1541 parse_portmask(const char *portmask)
1548 /* parse hexadecimal string */
1549 pm = strtoul(portmask, &end, 16);
1550 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1553 if ((pm == 0) && errno)
1560 parse_decimal(const char *str)
1565 num = strtoull(str, &end, 10);
1566 if ((str[0] == '\0') || (end == NULL) || (*end != '\0')
1574 parse_config(const char *q_arg)
1577 const char *p, *p0 = q_arg;
1585 unsigned long int_fld[_NUM_FLD];
1586 char *str_fld[_NUM_FLD];
1590 nb_lcore_params = 0;
1592 while ((p = strchr(p0, '(')) != NULL) {
1594 p0 = strchr(p, ')');
1599 if (size >= sizeof(s))
1602 snprintf(s, sizeof(s), "%.*s", size, p);
1603 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1606 for (i = 0; i < _NUM_FLD; i++) {
1608 int_fld[i] = strtoul(str_fld[i], &end, 0);
1609 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1612 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1613 printf("exceeded max number of lcore params: %hu\n",
1617 lcore_params_array[nb_lcore_params].port_id =
1618 (uint8_t)int_fld[FLD_PORT];
1619 lcore_params_array[nb_lcore_params].queue_id =
1620 (uint8_t)int_fld[FLD_QUEUE];
1621 lcore_params_array[nb_lcore_params].lcore_id =
1622 (uint8_t)int_fld[FLD_LCORE];
1625 lcore_params = lcore_params_array;
1630 print_app_sa_prm(const struct app_sa_prm *prm)
1632 printf("librte_ipsec usage: %s\n",
1633 (prm->enable == 0) ? "disabled" : "enabled");
1635 printf("replay window size: %u\n", prm->window_size);
1636 printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
1637 printf("SA flags: %#" PRIx64 "\n", prm->flags);
1638 printf("Frag TTL: %" PRIu64 " ns\n", frag_ttl_ns);
1642 parse_transfer_mode(struct eh_conf *conf, const char *optarg)
1644 if (!strcmp(CMD_LINE_ARG_POLL, optarg))
1645 conf->mode = EH_PKT_TRANSFER_MODE_POLL;
1646 else if (!strcmp(CMD_LINE_ARG_EVENT, optarg))
1647 conf->mode = EH_PKT_TRANSFER_MODE_EVENT;
1649 printf("Unsupported packet transfer mode\n");
1657 parse_schedule_type(struct eh_conf *conf, const char *optarg)
1659 struct eventmode_conf *em_conf = NULL;
1661 /* Get eventmode conf */
1662 em_conf = conf->mode_params;
1664 if (!strcmp(CMD_LINE_ARG_ORDERED, optarg))
1665 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
1666 else if (!strcmp(CMD_LINE_ARG_ATOMIC, optarg))
1667 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ATOMIC;
1668 else if (!strcmp(CMD_LINE_ARG_PARALLEL, optarg))
1669 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_PARALLEL;
1671 printf("Unsupported queue schedule type\n");
1679 parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
1684 int32_t option_index;
1685 char *prgname = argv[0];
1686 int32_t f_present = 0;
1687 struct eventmode_conf *em_conf = NULL;
1691 while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:t:s:",
1692 lgopts, &option_index)) != EOF) {
1696 enabled_port_mask = parse_portmask(optarg);
1697 if (enabled_port_mask == 0) {
1698 printf("invalid portmask\n");
1699 print_usage(prgname);
1704 printf("Promiscuous mode selected\n");
1708 unprotected_port_mask = parse_portmask(optarg);
1709 if (unprotected_port_mask == 0) {
1710 printf("invalid unprotected portmask\n");
1711 print_usage(prgname);
1716 if (f_present == 1) {
1717 printf("\"-f\" option present more than "
1719 print_usage(prgname);
1727 ret = parse_decimal(optarg);
1729 printf("Invalid number of buffers in a pool: "
1731 print_usage(prgname);
1735 nb_bufs_in_pool = ret;
1739 ret = parse_decimal(optarg);
1740 if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
1742 printf("Invalid frame buffer size value: %s\n",
1744 print_usage(prgname);
1747 frame_buf_size = ret;
1748 printf("Custom frame buffer size %u\n", frame_buf_size);
1751 app_sa_prm.enable = 1;
1754 app_sa_prm.window_size = parse_decimal(optarg);
1757 app_sa_prm.enable_esn = 1;
1760 app_sa_prm.enable = 1;
1761 app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
1764 ret = parse_decimal(optarg);
1766 printf("Invalid SA cache size: %s\n", optarg);
1767 print_usage(prgname);
1770 app_sa_prm.cache_sz = ret;
1773 ret = parse_decimal(optarg);
1775 printf("Invalid interval value: %s\n", optarg);
1776 print_usage(prgname);
1779 stats_interval = ret;
1781 case CMD_LINE_OPT_CONFIG_NUM:
1782 ret = parse_config(optarg);
1784 printf("Invalid config\n");
1785 print_usage(prgname);
1789 case CMD_LINE_OPT_SINGLE_SA_NUM:
1790 ret = parse_decimal(optarg);
1791 if (ret == -1 || ret > UINT32_MAX) {
1792 printf("Invalid argument[sa_idx]\n");
1793 print_usage(prgname);
1799 single_sa_idx = ret;
1800 eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1801 printf("Configured with single SA index %u\n",
1804 case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1805 ret = parse_portmask(optarg);
1807 printf("Invalid argument[portmask]\n");
1808 print_usage(prgname);
1813 enabled_cryptodev_mask = ret;
1816 case CMD_LINE_OPT_TRANSFER_MODE_NUM:
1817 ret = parse_transfer_mode(eh_conf, optarg);
1819 printf("Invalid packet transfer mode\n");
1820 print_usage(prgname);
1825 case CMD_LINE_OPT_SCHEDULE_TYPE_NUM:
1826 ret = parse_schedule_type(eh_conf, optarg);
1828 printf("Invalid queue schedule type\n");
1829 print_usage(prgname);
1834 case CMD_LINE_OPT_RX_OFFLOAD_NUM:
1835 ret = parse_mask(optarg, &dev_rx_offload);
1837 printf("Invalid argument for \'%s\': %s\n",
1838 CMD_LINE_OPT_RX_OFFLOAD, optarg);
1839 print_usage(prgname);
1843 case CMD_LINE_OPT_TX_OFFLOAD_NUM:
1844 ret = parse_mask(optarg, &dev_tx_offload);
1846 printf("Invalid argument for \'%s\': %s\n",
1847 CMD_LINE_OPT_TX_OFFLOAD, optarg);
1848 print_usage(prgname);
1852 case CMD_LINE_OPT_REASSEMBLE_NUM:
1853 ret = parse_decimal(optarg);
1854 if (ret < 0 || ret > UINT32_MAX) {
1855 printf("Invalid argument for \'%s\': %s\n",
1856 CMD_LINE_OPT_REASSEMBLE, optarg);
1857 print_usage(prgname);
1862 case CMD_LINE_OPT_MTU_NUM:
1863 ret = parse_decimal(optarg);
1864 if (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) {
1865 printf("Invalid argument for \'%s\': %s\n",
1866 CMD_LINE_OPT_MTU, optarg);
1867 print_usage(prgname);
1872 case CMD_LINE_OPT_FRAG_TTL_NUM:
1873 ret = parse_decimal(optarg);
1874 if (ret < 0 || ret > MAX_FRAG_TTL_NS) {
1875 printf("Invalid argument for \'%s\': %s\n",
1876 CMD_LINE_OPT_MTU, optarg);
1877 print_usage(prgname);
1882 case CMD_LINE_OPT_EVENT_VECTOR_NUM:
1883 em_conf = eh_conf->mode_params;
1884 em_conf->ext_params.event_vector = 1;
1886 case CMD_LINE_OPT_VECTOR_SIZE_NUM:
1887 ret = parse_decimal(optarg);
1889 if (ret > MAX_PKT_BURST) {
1890 printf("Invalid argument for \'%s\': %s\n",
1891 CMD_LINE_OPT_VECTOR_SIZE, optarg);
1892 print_usage(prgname);
1895 em_conf = eh_conf->mode_params;
1896 em_conf->ext_params.vector_size = ret;
1898 case CMD_LINE_OPT_VECTOR_TIMEOUT_NUM:
1899 ret = parse_decimal(optarg);
1901 em_conf = eh_conf->mode_params;
1902 em_conf->vector_tmo_ns = ret;
1904 case CMD_LINE_OPT_VECTOR_POOL_SZ_NUM:
1905 ret = parse_decimal(optarg);
1907 em_conf = eh_conf->mode_params;
1908 em_conf->vector_pool_sz = ret;
1910 case CMD_LINE_OPT_PER_PORT_POOL_NUM:
1914 print_usage(prgname);
1919 if (f_present == 0) {
1920 printf("Mandatory option \"-f\" not present\n");
1924 /* check do we need to enable multi-seg support */
1925 if (multi_seg_required()) {
1926 /* legacy mode doesn't support multi-seg */
1927 app_sa_prm.enable = 1;
1928 printf("frame buf size: %u, mtu: %u, "
1929 "number of reassemble entries: %u\n"
1930 "multi-segment support is required\n",
1931 frame_buf_size, mtu_size, frag_tbl_sz);
1934 print_app_sa_prm(&app_sa_prm);
1937 argv[optind-1] = prgname;
1940 optind = 1; /* reset getopt lib */
1945 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
1947 char buf[RTE_ETHER_ADDR_FMT_SIZE];
1948 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
1949 printf("%s%s", name, buf);
1953 * Update destination ethaddr for the port.
1956 add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr)
1958 if (port >= RTE_DIM(ethaddr_tbl))
1961 ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr);
1965 /* Check the link status of all ports in up to 9s, and print them finally */
1967 check_all_ports_link_status(uint32_t port_mask)
1969 #define CHECK_INTERVAL 100 /* 100ms */
1970 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1972 uint8_t count, all_ports_up, print_flag = 0;
1973 struct rte_eth_link link;
1975 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
1977 printf("\nChecking link status");
1979 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1981 RTE_ETH_FOREACH_DEV(portid) {
1982 if ((port_mask & (1 << portid)) == 0)
1984 memset(&link, 0, sizeof(link));
1985 ret = rte_eth_link_get_nowait(portid, &link);
1988 if (print_flag == 1)
1989 printf("Port %u link get failed: %s\n",
1990 portid, rte_strerror(-ret));
1993 /* print link status if flag set */
1994 if (print_flag == 1) {
1995 rte_eth_link_to_str(link_status_text,
1996 sizeof(link_status_text), &link);
1997 printf("Port %d %s\n", portid,
2001 /* clear all_ports_up flag if any link down */
2002 if (link.link_status == RTE_ETH_LINK_DOWN) {
2007 /* after finally printing all link status, get out */
2008 if (print_flag == 1)
2011 if (all_ports_up == 0) {
2014 rte_delay_ms(CHECK_INTERVAL);
2017 /* set the print_flag if all ports up or timeout */
2018 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2026 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
2027 uint16_t qp, struct lcore_params *params,
2028 struct ipsec_ctx *ipsec_ctx,
2029 const struct rte_cryptodev_capabilities *cipher,
2030 const struct rte_cryptodev_capabilities *auth,
2031 const struct rte_cryptodev_capabilities *aead)
2035 struct cdev_key key = { 0 };
2037 key.lcore_id = params->lcore_id;
2039 key.cipher_algo = cipher->sym.cipher.algo;
2041 key.auth_algo = auth->sym.auth.algo;
2043 key.aead_algo = aead->sym.aead.algo;
2045 ret = rte_hash_lookup(map, &key);
2049 for (i = 0; i < ipsec_ctx->nb_qps; i++)
2050 if (ipsec_ctx->tbl[i].id == cdev_id)
2053 if (i == ipsec_ctx->nb_qps) {
2054 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
2055 printf("Maximum number of crypto devices assigned to "
2056 "a core, increase MAX_QP_PER_LCORE value\n");
2059 ipsec_ctx->tbl[i].id = cdev_id;
2060 ipsec_ctx->tbl[i].qp = qp;
2061 ipsec_ctx->nb_qps++;
2062 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
2063 "(cdev_id_qp %lu)\n", str, key.lcore_id,
2067 ret = rte_hash_add_key_data(map, &key, (void *)i);
2069 printf("Failed to insert cdev mapping for (lcore %u, "
2070 "cdev %u, qp %u), errno %d\n",
2071 key.lcore_id, ipsec_ctx->tbl[i].id,
2072 ipsec_ctx->tbl[i].qp, ret);
2080 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
2081 uint16_t qp, struct lcore_params *params)
2084 const struct rte_cryptodev_capabilities *i, *j;
2085 struct rte_hash *map;
2086 struct lcore_conf *qconf;
2087 struct ipsec_ctx *ipsec_ctx;
2090 qconf = &lcore_conf[params->lcore_id];
2092 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
2094 ipsec_ctx = &qconf->outbound;
2098 ipsec_ctx = &qconf->inbound;
2102 /* Required cryptodevs with operation chaining */
2103 if (!(dev_info->feature_flags &
2104 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
2107 for (i = dev_info->capabilities;
2108 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
2109 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2112 if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2113 ret |= add_mapping(map, str, cdev_id, qp, params,
2114 ipsec_ctx, NULL, NULL, i);
2118 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
2121 for (j = dev_info->capabilities;
2122 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
2123 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2126 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
2129 ret |= add_mapping(map, str, cdev_id, qp, params,
2130 ipsec_ctx, i, j, NULL);
2137 /* Check if the device is enabled by cryptodev_mask */
2139 check_cryptodev_mask(uint8_t cdev_id)
2141 if (enabled_cryptodev_mask & (1 << cdev_id))
2148 cryptodevs_init(uint16_t req_queue_num)
2150 struct rte_cryptodev_config dev_conf;
2151 struct rte_cryptodev_qp_conf qp_conf;
2152 uint16_t idx, max_nb_qps, qp, total_nb_qps, i;
2154 struct rte_hash_parameters params = { 0 };
2156 const uint64_t mseg_flag = multi_seg_required() ?
2157 RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
2159 params.entries = CDEV_MAP_ENTRIES;
2160 params.key_len = sizeof(struct cdev_key);
2161 params.hash_func = rte_jhash;
2162 params.hash_func_init_val = 0;
2163 params.socket_id = rte_socket_id();
2165 params.name = "cdev_map_in";
2166 cdev_map_in = rte_hash_create(¶ms);
2167 if (cdev_map_in == NULL)
2168 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
2171 params.name = "cdev_map_out";
2172 cdev_map_out = rte_hash_create(¶ms);
2173 if (cdev_map_out == NULL)
2174 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
2177 printf("lcore/cryptodev/qp mappings:\n");
2181 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
2182 struct rte_cryptodev_info cdev_info;
2184 if (check_cryptodev_mask((uint8_t)cdev_id))
2187 rte_cryptodev_info_get(cdev_id, &cdev_info);
2189 if ((mseg_flag & cdev_info.feature_flags) != mseg_flag)
2190 rte_exit(EXIT_FAILURE,
2191 "Device %hd does not support \'%s\' feature\n",
2193 rte_cryptodev_get_feature_name(mseg_flag));
2195 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
2196 max_nb_qps = cdev_info.max_nb_queue_pairs;
2198 max_nb_qps = nb_lcore_params;
2202 while (qp < max_nb_qps && i < nb_lcore_params) {
2203 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
2204 &lcore_params[idx]))
2207 idx = idx % nb_lcore_params;
2211 qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp));
2216 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
2217 dev_conf.nb_queue_pairs = qp;
2218 dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
2220 uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
2221 if (dev_max_sess != 0 &&
2222 dev_max_sess < get_nb_crypto_sessions())
2223 rte_exit(EXIT_FAILURE,
2224 "Device does not support at least %u "
2225 "sessions", get_nb_crypto_sessions());
2227 if (rte_cryptodev_configure(cdev_id, &dev_conf))
2228 rte_panic("Failed to initialize cryptodev %u\n",
2231 qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
2232 qp_conf.mp_session =
2233 socket_ctx[dev_conf.socket_id].session_pool;
2234 qp_conf.mp_session_private =
2235 socket_ctx[dev_conf.socket_id].session_priv_pool;
2236 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
2237 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
2238 &qp_conf, dev_conf.socket_id))
2239 rte_panic("Failed to setup queue %u for "
2240 "cdev_id %u\n", 0, cdev_id);
2242 if (rte_cryptodev_start(cdev_id))
2243 rte_panic("Failed to start cryptodev %u\n",
2249 return total_nb_qps;
2253 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
2255 struct rte_eth_dev_info dev_info;
2256 struct rte_eth_txconf *txconf;
2257 uint16_t nb_tx_queue, nb_rx_queue;
2258 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
2259 int32_t ret, socket_id;
2260 struct lcore_conf *qconf;
2261 struct rte_ether_addr ethaddr;
2262 struct rte_eth_conf local_port_conf = port_conf;
2264 ret = rte_eth_dev_info_get(portid, &dev_info);
2266 rte_exit(EXIT_FAILURE,
2267 "Error during getting device (port %u) info: %s\n",
2268 portid, strerror(-ret));
2270 /* limit allowed HW offloads, as user requested */
2271 dev_info.rx_offload_capa &= dev_rx_offload;
2272 dev_info.tx_offload_capa &= dev_tx_offload;
2274 printf("Configuring device port %u:\n", portid);
2276 ret = rte_eth_macaddr_get(portid, ðaddr);
2278 rte_exit(EXIT_FAILURE,
2279 "Error getting MAC address (port %u): %s\n",
2280 portid, rte_strerror(-ret));
2282 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr);
2283 print_ethaddr("Address: ", ðaddr);
2286 nb_rx_queue = get_port_nb_rx_queues(portid);
2287 nb_tx_queue = nb_lcores;
2289 if (nb_rx_queue > dev_info.max_rx_queues)
2290 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
2291 "(max rx queue is %u)\n",
2292 nb_rx_queue, dev_info.max_rx_queues);
2294 if (nb_tx_queue > dev_info.max_tx_queues)
2295 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
2296 "(max tx queue is %u)\n",
2297 nb_tx_queue, dev_info.max_tx_queues);
2299 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
2300 nb_rx_queue, nb_tx_queue);
2302 local_port_conf.rxmode.mtu = mtu_size;
2304 if (multi_seg_required()) {
2305 local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
2306 local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
2309 local_port_conf.rxmode.offloads |= req_rx_offloads;
2310 local_port_conf.txmode.offloads |= req_tx_offloads;
2312 /* Check that all required capabilities are supported */
2313 if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
2314 local_port_conf.rxmode.offloads)
2315 rte_exit(EXIT_FAILURE,
2316 "Error: port %u required RX offloads: 0x%" PRIx64
2317 ", available RX offloads: 0x%" PRIx64 "\n",
2318 portid, local_port_conf.rxmode.offloads,
2319 dev_info.rx_offload_capa);
2321 if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
2322 local_port_conf.txmode.offloads)
2323 rte_exit(EXIT_FAILURE,
2324 "Error: port %u required TX offloads: 0x%" PRIx64
2325 ", available TX offloads: 0x%" PRIx64 "\n",
2326 portid, local_port_conf.txmode.offloads,
2327 dev_info.tx_offload_capa);
2329 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
2330 local_port_conf.txmode.offloads |=
2331 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
2333 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
2334 local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
2336 printf("port %u configuring rx_offloads=0x%" PRIx64
2337 ", tx_offloads=0x%" PRIx64 "\n",
2338 portid, local_port_conf.rxmode.offloads,
2339 local_port_conf.txmode.offloads);
2341 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
2342 dev_info.flow_type_rss_offloads;
2343 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
2344 port_conf.rx_adv_conf.rss_conf.rss_hf) {
2345 printf("Port %u modified RSS hash function based on hardware support,"
2346 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
2348 port_conf.rx_adv_conf.rss_conf.rss_hf,
2349 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
2352 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
2355 rte_exit(EXIT_FAILURE, "Cannot configure device: "
2356 "err=%d, port=%d\n", ret, portid);
2358 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
2360 rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
2361 "err=%d, port=%d\n", ret, portid);
2363 /* init one TX queue per lcore */
2365 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2366 if (rte_lcore_is_enabled(lcore_id) == 0)
2370 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2375 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
2377 txconf = &dev_info.default_txconf;
2378 txconf->offloads = local_port_conf.txmode.offloads;
2380 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
2383 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
2384 "err=%d, port=%d\n", ret, portid);
2386 qconf = &lcore_conf[lcore_id];
2387 qconf->tx_queue_id[portid] = tx_queueid;
2389 /* Pre-populate pkt offloads based on capabilities */
2390 qconf->outbound.ipv4_offloads = RTE_MBUF_F_TX_IPV4;
2391 qconf->outbound.ipv6_offloads = RTE_MBUF_F_TX_IPV6;
2392 if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
2393 qconf->outbound.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM;
2397 /* init RX queues */
2398 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
2399 struct rte_eth_rxconf rxq_conf;
2400 struct rte_mempool *pool;
2402 if (portid != qconf->rx_queue_list[queue].port_id)
2405 rx_queueid = qconf->rx_queue_list[queue].queue_id;
2407 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
2410 rxq_conf = dev_info.default_rxconf;
2411 rxq_conf.offloads = local_port_conf.rxmode.offloads;
2414 pool = socket_ctx[socket_id].mbuf_pool[portid];
2416 pool = socket_ctx[socket_id].mbuf_pool[0];
2418 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
2419 nb_rxd, socket_id, &rxq_conf, pool);
2421 rte_exit(EXIT_FAILURE,
2422 "rte_eth_rx_queue_setup: err=%d, "
2423 "port=%d\n", ret, portid);
2430 max_session_size(void)
2434 int16_t cdev_id, port_id, n;
2437 n = rte_cryptodev_count();
2438 for (cdev_id = 0; cdev_id != n; cdev_id++) {
2439 sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
2443 * If crypto device is security capable, need to check the
2444 * size of security session as well.
2447 /* Get security context of the crypto device */
2448 sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
2449 if (sec_ctx == NULL)
2452 /* Get size of security session */
2453 sz = rte_security_session_get_size(sec_ctx);
2458 RTE_ETH_FOREACH_DEV(port_id) {
2459 if ((enabled_port_mask & (1 << port_id)) == 0)
2462 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
2463 if (sec_ctx == NULL)
2466 sz = rte_security_session_get_size(sec_ctx);
2475 session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
2477 char mp_name[RTE_MEMPOOL_NAMESIZE];
2478 struct rte_mempool *sess_mp;
2481 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2482 "sess_mp_%u", socket_id);
2483 nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
2485 nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
2486 CDEV_MP_CACHE_MULTIPLIER);
2487 sess_mp = rte_cryptodev_sym_session_pool_create(
2488 mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0,
2490 ctx->session_pool = sess_mp;
2492 if (ctx->session_pool == NULL)
2493 rte_exit(EXIT_FAILURE,
2494 "Cannot init session pool on socket %d\n", socket_id);
2496 printf("Allocated session pool on socket %d\n", socket_id);
2500 session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
2503 char mp_name[RTE_MEMPOOL_NAMESIZE];
2504 struct rte_mempool *sess_mp;
2507 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2508 "sess_mp_priv_%u", socket_id);
2509 nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
2511 nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
2512 CDEV_MP_CACHE_MULTIPLIER);
2513 sess_mp = rte_mempool_create(mp_name,
2517 0, NULL, NULL, NULL,
2520 ctx->session_priv_pool = sess_mp;
2522 if (ctx->session_priv_pool == NULL)
2523 rte_exit(EXIT_FAILURE,
2524 "Cannot init session priv pool on socket %d\n",
2527 printf("Allocated session priv pool on socket %d\n",
2532 pool_init(struct socket_ctx *ctx, int32_t socket_id, int portid,
2539 /* mbuf_pool is initialised by the pool_init() function*/
2540 if (socket_ctx[socket_id].mbuf_pool[portid])
2543 snprintf(s, sizeof(s), "mbuf_pool_%d_%d", socket_id, portid);
2544 ctx->mbuf_pool[portid] = rte_pktmbuf_pool_create(s, nb_mbuf,
2546 ipsec_metadata_size(),
2551 * if multi-segment support is enabled, then create a pool
2552 * for indirect mbufs. This is not per-port but global.
2554 ms = multi_seg_required();
2555 if (ms != 0 && !ctx->mbuf_pool_indir) {
2556 snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
2557 ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
2558 MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
2561 if (ctx->mbuf_pool[portid] == NULL ||
2562 (ms != 0 && ctx->mbuf_pool_indir == NULL))
2563 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
2566 printf("Allocated mbuf pool on socket %d\n", socket_id);
2570 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
2572 struct ipsec_sa *sa;
2574 /* For inline protocol processing, the metadata in the event will
2575 * uniquely identify the security session which raised the event.
2576 * Application would then need the userdata it had registered with the
2577 * security session to process the event.
2580 sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
2583 /* userdata could not be retrieved */
2587 /* Sequence number over flow. SA need to be re-established */
2593 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2594 void *param, void *ret_param)
2597 struct rte_eth_event_ipsec_desc *event_desc = NULL;
2598 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
2599 rte_eth_dev_get_sec_ctx(port_id);
2601 RTE_SET_USED(param);
2603 if (type != RTE_ETH_EVENT_IPSEC)
2606 event_desc = ret_param;
2607 if (event_desc == NULL) {
2608 printf("Event descriptor not set\n");
2612 md = event_desc->metadata;
2614 if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
2615 return inline_ipsec_event_esn_overflow(ctx, md);
2616 else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
2617 printf("Invalid IPsec event reported\n");
2625 ethdev_reset_event_callback(uint16_t port_id,
2626 enum rte_eth_event_type type,
2627 void *param __rte_unused, void *ret_param __rte_unused)
2629 printf("Reset Event on port id %d type %d\n", port_id, type);
2630 printf("Force quit application");
2636 rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
2637 struct rte_mbuf *pkt[], uint16_t nb_pkts,
2638 __rte_unused uint16_t max_pkts, void *user_param)
2642 struct lcore_conf *lc;
2643 struct rte_mbuf *mb;
2644 struct rte_ether_hdr *eth;
2650 for (i = 0; i != nb_pkts; i++) {
2653 eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
2654 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
2656 struct rte_ipv4_hdr *iph;
2658 iph = (struct rte_ipv4_hdr *)(eth + 1);
2659 if (rte_ipv4_frag_pkt_is_fragmented(iph)) {
2661 mb->l2_len = sizeof(*eth);
2662 mb->l3_len = sizeof(*iph);
2663 tm = (tm != 0) ? tm : rte_rdtsc();
2664 mb = rte_ipv4_frag_reassemble_packet(
2665 lc->frag.tbl, &lc->frag.dr,
2669 /* fix ip cksum after reassemble. */
2670 iph = rte_pktmbuf_mtod_offset(mb,
2671 struct rte_ipv4_hdr *,
2673 iph->hdr_checksum = 0;
2674 iph->hdr_checksum = rte_ipv4_cksum(iph);
2677 } else if (eth->ether_type ==
2678 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
2680 struct rte_ipv6_hdr *iph;
2681 struct rte_ipv6_fragment_ext *fh;
2683 iph = (struct rte_ipv6_hdr *)(eth + 1);
2684 fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
2686 mb->l2_len = sizeof(*eth);
2687 mb->l3_len = (uintptr_t)fh - (uintptr_t)iph +
2689 tm = (tm != 0) ? tm : rte_rdtsc();
2690 mb = rte_ipv6_frag_reassemble_packet(
2691 lc->frag.tbl, &lc->frag.dr,
2694 /* fix l3_len after reassemble. */
2695 mb->l3_len = mb->l3_len - sizeof(*fh);
2703 /* some fragments were encountered, drain death row */
2705 rte_ip_frag_free_death_row(&lc->frag.dr, 0);
2712 reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)
2716 uint64_t frag_cycles;
2717 const struct lcore_rx_queue *rxq;
2718 const struct rte_eth_rxtx_callback *cb;
2720 /* create fragment table */
2721 sid = rte_lcore_to_socket_id(cid);
2722 frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) /
2723 NS_PER_S * frag_ttl_ns;
2725 lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
2726 FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
2727 if (lc->frag.tbl == NULL) {
2728 printf("%s(%u): failed to create fragment table of size: %u, "
2730 __func__, cid, frag_tbl_sz, rte_errno);
2734 /* setup reassemble RX callbacks for all queues */
2735 for (i = 0; i != lc->nb_rx_queue; i++) {
2737 rxq = lc->rx_queue_list + i;
2738 cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,
2741 printf("%s(%u): failed to install RX callback for "
2742 "portid=%u, queueid=%u, error code: %d\n",
2744 rxq->port_id, rxq->queue_id, rte_errno);
2753 reassemble_init(void)
2759 for (i = 0; i != nb_lcore_params; i++) {
2760 lc = lcore_params[i].lcore_id;
2761 rc = reassemble_lcore_init(lcore_conf + lc, lc);
2770 create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
2772 struct rte_flow_action action[2];
2773 struct rte_flow_item pattern[2];
2774 struct rte_flow_attr attr = {0};
2775 struct rte_flow_error err;
2776 struct rte_flow *flow;
2779 if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
2782 /* Add the default rte_flow to enable SECURITY for all ESP packets */
2784 pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
2785 pattern[0].spec = NULL;
2786 pattern[0].mask = NULL;
2787 pattern[0].last = NULL;
2788 pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
2790 action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
2791 action[0].conf = NULL;
2792 action[1].type = RTE_FLOW_ACTION_TYPE_END;
2793 action[1].conf = NULL;
2797 ret = rte_flow_validate(port_id, &attr, pattern, action, &err);
2801 flow = rte_flow_create(port_id, &attr, pattern, action, &err);
2805 flow_info_tbl[port_id].rx_def_flow = flow;
2806 RTE_LOG(INFO, IPSEC,
2807 "Created default flow enabling SECURITY for all ESP traffic on port %d\n",
2812 signal_handler(int signum)
2814 if (signum == SIGINT || signum == SIGTERM) {
2815 printf("\n\nSignal %d received, preparing to exit...\n",
2822 ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
2824 struct rte_ipsec_session *ips;
2830 for (i = 0; i < nb_sa; i++) {
2831 ips = ipsec_get_primary_session(&sa[i]);
2832 if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
2833 rte_exit(EXIT_FAILURE, "Event mode supports only "
2834 "inline protocol sessions\n");
2840 check_event_mode_params(struct eh_conf *eh_conf)
2842 struct eventmode_conf *em_conf = NULL;
2843 struct lcore_params *params;
2846 if (!eh_conf || !eh_conf->mode_params)
2849 /* Get eventmode conf */
2850 em_conf = eh_conf->mode_params;
2852 if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL &&
2853 em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) {
2854 printf("error: option --event-schedule-type applies only to "
2859 if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
2862 /* Set schedule type to ORDERED if it wasn't explicitly set by user */
2863 if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET)
2864 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
2867 * Event mode currently supports only inline protocol sessions.
2868 * If there are other types of sessions configured then exit with
2871 ev_mode_sess_verify(sa_in, nb_sa_in);
2872 ev_mode_sess_verify(sa_out, nb_sa_out);
2875 /* Option --config does not apply to event mode */
2876 if (nb_lcore_params > 0) {
2877 printf("error: option --config applies only to poll mode\n");
2882 * In order to use the same port_init routine for both poll and event
2883 * modes initialize lcore_params with one queue for each eth port
2885 lcore_params = lcore_params_array;
2886 RTE_ETH_FOREACH_DEV(portid) {
2887 if ((enabled_port_mask & (1 << portid)) == 0)
2890 params = &lcore_params[nb_lcore_params++];
2891 params->port_id = portid;
2892 params->queue_id = 0;
2893 params->lcore_id = rte_get_next_lcore(0, 0, 1);
2900 inline_sessions_free(struct sa_ctx *sa_ctx)
2902 struct rte_ipsec_session *ips;
2903 struct ipsec_sa *sa;
2910 for (i = 0; i < sa_ctx->nb_sa; i++) {
2912 sa = &sa_ctx->sa[i];
2916 ips = ipsec_get_primary_session(sa);
2917 if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
2918 ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
2921 if (!rte_eth_dev_is_valid_port(sa->portid))
2924 ret = rte_security_session_destroy(
2925 rte_eth_dev_get_sec_ctx(sa->portid),
2928 RTE_LOG(ERR, IPSEC, "Failed to destroy security "
2929 "session type %d, spi %d\n",
2930 ips->type, sa->spi);
2935 calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq,
2938 return RTE_MAX((nb_rxq * nb_rxd +
2939 nb_ports * nb_lcores * MAX_PKT_BURST +
2940 nb_ports * nb_txq * nb_txd +
2941 nb_lcores * MEMPOOL_CACHE_SIZE +
2942 nb_crypto_qp * CDEV_QUEUE_DESC +
2943 nb_lcores * frag_tbl_sz *
2944 FRAG_TBL_BUCKET_ENTRIES),
2950 handle_telemetry_cmd_ipsec_secgw_stats(const char *cmd __rte_unused,
2951 const char *params, struct rte_tel_data *data)
2953 uint64_t total_pkts_dropped = 0, total_pkts_tx = 0, total_pkts_rx = 0;
2954 unsigned int coreid;
2956 rte_tel_data_start_dict(data);
2959 coreid = (uint32_t)atoi(params);
2960 if (rte_lcore_is_enabled(coreid) == 0)
2963 total_pkts_dropped = core_statistics[coreid].dropped;
2964 total_pkts_tx = core_statistics[coreid].tx;
2965 total_pkts_rx = core_statistics[coreid].rx;
2968 for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
2970 /* skip disabled cores */
2971 if (rte_lcore_is_enabled(coreid) == 0)
2974 total_pkts_dropped += core_statistics[coreid].dropped;
2975 total_pkts_tx += core_statistics[coreid].tx;
2976 total_pkts_rx += core_statistics[coreid].rx;
2980 /* add telemetry key/values pairs */
2981 rte_tel_data_add_dict_u64(data, "packets received",
2984 rte_tel_data_add_dict_u64(data, "packets transmitted",
2987 rte_tel_data_add_dict_u64(data, "packets dropped",
2988 total_pkts_dropped);
2995 update_lcore_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
2997 struct ipsec_core_statistics *lcore_stats;
2999 /* skip disabled cores */
3000 if (rte_lcore_is_enabled(coreid) == 0)
3003 lcore_stats = &core_statistics[coreid];
3005 total->rx = lcore_stats->rx;
3006 total->dropped = lcore_stats->dropped;
3007 total->tx = lcore_stats->tx;
3009 /* outbound stats */
3010 total->outbound.spd6.protect += lcore_stats->outbound.spd6.protect;
3011 total->outbound.spd6.bypass += lcore_stats->outbound.spd6.bypass;
3012 total->outbound.spd6.discard += lcore_stats->outbound.spd6.discard;
3014 total->outbound.spd4.protect += lcore_stats->outbound.spd4.protect;
3015 total->outbound.spd4.bypass += lcore_stats->outbound.spd4.bypass;
3016 total->outbound.spd4.discard += lcore_stats->outbound.spd4.discard;
3018 total->outbound.sad.miss += lcore_stats->outbound.sad.miss;
3021 total->inbound.spd6.protect += lcore_stats->inbound.spd6.protect;
3022 total->inbound.spd6.bypass += lcore_stats->inbound.spd6.bypass;
3023 total->inbound.spd6.discard += lcore_stats->inbound.spd6.discard;
3025 total->inbound.spd4.protect += lcore_stats->inbound.spd4.protect;
3026 total->inbound.spd4.bypass += lcore_stats->inbound.spd4.bypass;
3027 total->inbound.spd4.discard += lcore_stats->inbound.spd4.discard;
3029 total->inbound.sad.miss += lcore_stats->inbound.sad.miss;
3033 total->lpm4.miss += lcore_stats->lpm4.miss;
3034 total->lpm6.miss += lcore_stats->lpm6.miss;
3038 update_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
3040 memset(total, 0, sizeof(*total));
3042 if (coreid != UINT32_MAX) {
3043 update_lcore_statistics(total, coreid);
3045 for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++)
3046 update_lcore_statistics(total, coreid);
3051 handle_telemetry_cmd_ipsec_secgw_stats_outbound(const char *cmd __rte_unused,
3052 const char *params, struct rte_tel_data *data)
3054 struct ipsec_core_statistics total_stats;
3056 struct rte_tel_data *spd4_data = rte_tel_data_alloc();
3057 struct rte_tel_data *spd6_data = rte_tel_data_alloc();
3058 struct rte_tel_data *sad_data = rte_tel_data_alloc();
3059 unsigned int coreid = UINT32_MAX;
3062 /* verify allocated telemetry data structures */
3063 if (!spd4_data || !spd6_data || !sad_data) {
3068 /* initialize telemetry data structs as dicts */
3069 rte_tel_data_start_dict(data);
3071 rte_tel_data_start_dict(spd4_data);
3072 rte_tel_data_start_dict(spd6_data);
3073 rte_tel_data_start_dict(sad_data);
3076 coreid = (uint32_t)atoi(params);
3077 if (rte_lcore_is_enabled(coreid) == 0) {
3083 update_statistics(&total_stats, coreid);
3085 /* add spd 4 telemetry key/values pairs */
3087 rte_tel_data_add_dict_u64(spd4_data, "protect",
3088 total_stats.outbound.spd4.protect);
3089 rte_tel_data_add_dict_u64(spd4_data, "bypass",
3090 total_stats.outbound.spd4.bypass);
3091 rte_tel_data_add_dict_u64(spd4_data, "discard",
3092 total_stats.outbound.spd4.discard);
3094 rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
3096 /* add spd 6 telemetry key/values pairs */
3098 rte_tel_data_add_dict_u64(spd6_data, "protect",
3099 total_stats.outbound.spd6.protect);
3100 rte_tel_data_add_dict_u64(spd6_data, "bypass",
3101 total_stats.outbound.spd6.bypass);
3102 rte_tel_data_add_dict_u64(spd6_data, "discard",
3103 total_stats.outbound.spd6.discard);
3105 rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
3107 /* add sad telemetry key/values pairs */
3109 rte_tel_data_add_dict_u64(sad_data, "miss",
3110 total_stats.outbound.sad.miss);
3112 rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
3116 rte_tel_data_free(spd4_data);
3117 rte_tel_data_free(spd6_data);
3118 rte_tel_data_free(sad_data);
3124 handle_telemetry_cmd_ipsec_secgw_stats_inbound(const char *cmd __rte_unused,
3125 const char *params, struct rte_tel_data *data)
3127 struct ipsec_core_statistics total_stats;
3129 struct rte_tel_data *spd4_data = rte_tel_data_alloc();
3130 struct rte_tel_data *spd6_data = rte_tel_data_alloc();
3131 struct rte_tel_data *sad_data = rte_tel_data_alloc();
3132 unsigned int coreid = UINT32_MAX;
3135 /* verify allocated telemetry data structures */
3136 if (!spd4_data || !spd6_data || !sad_data) {
3141 /* initialize telemetry data structs as dicts */
3142 rte_tel_data_start_dict(data);
3143 rte_tel_data_start_dict(spd4_data);
3144 rte_tel_data_start_dict(spd6_data);
3145 rte_tel_data_start_dict(sad_data);
3147 /* add children dicts to parent dict */
3150 coreid = (uint32_t)atoi(params);
3151 if (rte_lcore_is_enabled(coreid) == 0) {
3157 update_statistics(&total_stats, coreid);
3159 /* add sad telemetry key/values pairs */
3161 rte_tel_data_add_dict_u64(sad_data, "miss",
3162 total_stats.inbound.sad.miss);
3164 rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
3166 /* add spd 4 telemetry key/values pairs */
3168 rte_tel_data_add_dict_u64(spd4_data, "protect",
3169 total_stats.inbound.spd4.protect);
3170 rte_tel_data_add_dict_u64(spd4_data, "bypass",
3171 total_stats.inbound.spd4.bypass);
3172 rte_tel_data_add_dict_u64(spd4_data, "discard",
3173 total_stats.inbound.spd4.discard);
3175 rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
3177 /* add spd 6 telemetry key/values pairs */
3179 rte_tel_data_add_dict_u64(spd6_data, "protect",
3180 total_stats.inbound.spd6.protect);
3181 rte_tel_data_add_dict_u64(spd6_data, "bypass",
3182 total_stats.inbound.spd6.bypass);
3183 rte_tel_data_add_dict_u64(spd6_data, "discard",
3184 total_stats.inbound.spd6.discard);
3186 rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
3190 rte_tel_data_free(spd4_data);
3191 rte_tel_data_free(spd6_data);
3192 rte_tel_data_free(sad_data);
3198 handle_telemetry_cmd_ipsec_secgw_stats_routing(const char *cmd __rte_unused,
3199 const char *params, struct rte_tel_data *data)
3201 struct ipsec_core_statistics total_stats;
3203 struct rte_tel_data *lpm4_data = rte_tel_data_alloc();
3204 struct rte_tel_data *lpm6_data = rte_tel_data_alloc();
3205 unsigned int coreid = UINT32_MAX;
3208 /* verify allocated telemetry data structures */
3209 if (!lpm4_data || !lpm6_data) {
3214 /* initialize telemetry data structs as dicts */
3215 rte_tel_data_start_dict(data);
3216 rte_tel_data_start_dict(lpm4_data);
3217 rte_tel_data_start_dict(lpm6_data);
3221 coreid = (uint32_t)atoi(params);
3222 if (rte_lcore_is_enabled(coreid) == 0) {
3228 update_statistics(&total_stats, coreid);
3230 /* add lpm 4 telemetry key/values pairs */
3231 rte_tel_data_add_dict_u64(lpm4_data, "miss",
3232 total_stats.lpm4.miss);
3234 rte_tel_data_add_dict_container(data, "IPv4 LPM", lpm4_data, 0);
3236 /* add lpm 6 telemetry key/values pairs */
3237 rte_tel_data_add_dict_u64(lpm6_data, "miss",
3238 total_stats.lpm6.miss);
3240 rte_tel_data_add_dict_container(data, "IPv6 LPM", lpm6_data, 0);
3244 rte_tel_data_free(lpm4_data);
3245 rte_tel_data_free(lpm6_data);
3251 ipsec_secgw_telemetry_init(void)
3253 rte_telemetry_register_cmd("/examples/ipsec-secgw/stats",
3254 handle_telemetry_cmd_ipsec_secgw_stats,
3255 "Returns global stats. "
3256 "Optional Parameters: int <logical core id>");
3258 rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/outbound",
3259 handle_telemetry_cmd_ipsec_secgw_stats_outbound,
3260 "Returns outbound global stats. "
3261 "Optional Parameters: int <logical core id>");
3263 rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/inbound",
3264 handle_telemetry_cmd_ipsec_secgw_stats_inbound,
3265 "Returns inbound global stats. "
3266 "Optional Parameters: int <logical core id>");
3268 rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/routing",
3269 handle_telemetry_cmd_ipsec_secgw_stats_routing,
3270 "Returns routing stats. "
3271 "Optional Parameters: int <logical core id>");
3276 main(int32_t argc, char **argv)
3279 uint32_t lcore_id, nb_txq, nb_rxq = 0;
3283 uint16_t portid, nb_crypto_qp, nb_ports = 0;
3284 uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
3285 uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
3286 struct eh_conf *eh_conf = NULL;
3289 nb_bufs_in_pool = 0;
3292 ret = rte_eal_init(argc, argv);
3294 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
3299 signal(SIGINT, signal_handler);
3300 signal(SIGTERM, signal_handler);
3302 /* initialize event helper configuration */
3303 eh_conf = eh_conf_init();
3304 if (eh_conf == NULL)
3305 rte_exit(EXIT_FAILURE, "Failed to init event helper config");
3307 /* parse application arguments (after the EAL ones) */
3308 ret = parse_args(argc, argv, eh_conf);
3310 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
3312 ipsec_secgw_telemetry_init();
3314 /* parse configuration file */
3315 if (parse_cfg_file(cfgfile) < 0) {
3316 printf("parsing file \"%s\" failed\n",
3318 print_usage(argv[0]);
3322 if ((unprotected_port_mask & enabled_port_mask) !=
3323 unprotected_port_mask)
3324 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
3325 unprotected_port_mask);
3327 if (unprotected_port_mask && !nb_sa_in)
3328 rte_exit(EXIT_FAILURE, "Cannot use unprotected portmask without configured SA inbound\n");
3330 if (check_poll_mode_params(eh_conf) < 0)
3331 rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n");
3333 if (check_event_mode_params(eh_conf) < 0)
3334 rte_exit(EXIT_FAILURE, "check_event_mode_params failed\n");
3336 ret = init_lcore_rx_queues();
3338 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
3340 nb_lcores = rte_lcore_count();
3342 sess_sz = max_session_size();
3345 * In event mode request minimum number of crypto queues
3346 * to be reserved equal to number of ports.
3348 if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT)
3349 nb_crypto_qp = rte_eth_dev_count_avail();
3353 nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
3355 if (nb_bufs_in_pool == 0) {
3356 RTE_ETH_FOREACH_DEV(portid) {
3357 if ((enabled_port_mask & (1 << portid)) == 0)
3360 nb_rxq += get_port_nb_rx_queues(portid);
3365 nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp,
3369 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
3370 if (rte_lcore_is_enabled(lcore_id) == 0)
3374 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
3378 if (per_port_pool) {
3379 RTE_ETH_FOREACH_DEV(portid) {
3380 if ((enabled_port_mask & (1 << portid)) == 0)
3383 pool_init(&socket_ctx[socket_id], socket_id,
3384 portid, nb_bufs_in_pool);
3387 pool_init(&socket_ctx[socket_id], socket_id, 0,
3391 if (socket_ctx[socket_id].session_pool)
3394 session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
3395 session_priv_pool_init(&socket_ctx[socket_id], socket_id,
3398 printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool);
3400 RTE_ETH_FOREACH_DEV(portid) {
3401 if ((enabled_port_mask & (1 << portid)) == 0)
3404 sa_check_offloads(portid, &req_rx_offloads[portid],
3405 &req_tx_offloads[portid]);
3406 port_init(portid, req_rx_offloads[portid],
3407 req_tx_offloads[portid]);
3411 * Set the enabled port mask in helper config for use by helper
3412 * sub-system. This will be used while initializing devices using
3413 * helper sub-system.
3415 eh_conf->eth_portmask = enabled_port_mask;
3417 /* Initialize eventmode components */
3418 ret = eh_devs_init(eh_conf);
3420 rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret);
3423 RTE_ETH_FOREACH_DEV(portid) {
3424 if ((enabled_port_mask & (1 << portid)) == 0)
3427 ret = rte_eth_dev_start(portid);
3429 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
3430 "err=%d, port=%d\n", ret, portid);
3432 /* Create flow after starting the device */
3433 create_default_ipsec_flow(portid, req_rx_offloads[portid]);
3436 * If enabled, put device in promiscuous mode.
3437 * This allows IO forwarding mode to forward packets
3438 * to itself through 2 cross-connected ports of the
3441 if (promiscuous_on) {
3442 ret = rte_eth_promiscuous_enable(portid);
3444 rte_exit(EXIT_FAILURE,
3445 "rte_eth_promiscuous_enable: err=%s, port=%d\n",
3446 rte_strerror(-ret), portid);
3449 rte_eth_dev_callback_register(portid, RTE_ETH_EVENT_INTR_RESET,
3450 ethdev_reset_event_callback, NULL);
3452 rte_eth_dev_callback_register(portid,
3453 RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
3456 /* fragment reassemble is enabled */
3457 if (frag_tbl_sz != 0) {
3458 ret = reassemble_init();
3460 rte_exit(EXIT_FAILURE, "failed at reassemble init");
3463 /* Replicate each context per socket */
3464 for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
3465 socket_id = rte_socket_id_by_idx(i);
3466 if ((socket_ctx[socket_id].session_pool != NULL) &&
3467 (socket_ctx[socket_id].sa_in == NULL) &&
3468 (socket_ctx[socket_id].sa_out == NULL)) {
3469 sa_init(&socket_ctx[socket_id], socket_id);
3470 sp4_init(&socket_ctx[socket_id], socket_id);
3471 sp6_init(&socket_ctx[socket_id], socket_id);
3472 rt_init(&socket_ctx[socket_id], socket_id);
3478 check_all_ports_link_status(enabled_port_mask);
3480 if (stats_interval > 0)
3481 rte_eal_alarm_set(stats_interval * US_PER_S,
3482 print_stats_cb, NULL);
3484 RTE_LOG(INFO, IPSEC, "Stats display disabled\n");
3486 /* launch per-lcore init on every lcore */
3487 rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN);
3488 RTE_LCORE_FOREACH_WORKER(lcore_id) {
3489 if (rte_eal_wait_lcore(lcore_id) < 0)
3493 /* Uninitialize eventmode components */
3494 ret = eh_devs_uninit(eh_conf);
3496 rte_exit(EXIT_FAILURE, "eh_devs_uninit failed, err=%d\n", ret);
3498 /* Free eventmode configuration memory */
3499 eh_conf_uninit(eh_conf);
3501 /* Destroy inline inbound and outbound sessions */
3502 for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
3503 socket_id = rte_socket_id_by_idx(i);
3504 inline_sessions_free(socket_ctx[socket_id].sa_in);
3505 inline_sessions_free(socket_ctx[socket_id].sa_out);
3508 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
3509 printf("Closing cryptodev %d...", cdev_id);
3510 rte_cryptodev_stop(cdev_id);
3511 rte_cryptodev_close(cdev_id);
3515 RTE_ETH_FOREACH_DEV(portid) {
3516 if ((enabled_port_mask & (1 << portid)) == 0)
3519 printf("Closing port %d...", portid);
3520 if (flow_info_tbl[portid].rx_def_flow) {
3521 struct rte_flow_error err;
3523 ret = rte_flow_destroy(portid,
3524 flow_info_tbl[portid].rx_def_flow, &err);
3526 RTE_LOG(ERR, IPSEC, "Failed to destroy flow "
3527 " for port %u, err msg: %s\n", portid,
3530 ret = rte_eth_dev_stop(portid);
3533 "rte_eth_dev_stop: err=%s, port=%u\n",
3534 rte_strerror(-ret), portid);
3536 rte_eth_dev_close(portid);
3540 /* clean up the EAL */