1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
10 #include <sys/types.h>
11 #include <netinet/in.h>
12 #include <netinet/ip.h>
13 #include <netinet/ip6.h>
15 #include <sys/queue.h>
21 #include <rte_common.h>
22 #include <rte_bitmap.h>
23 #include <rte_byteorder.h>
26 #include <rte_launch.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
37 #include <rte_mempool.h>
43 #include <rte_jhash.h>
44 #include <rte_cryptodev.h>
45 #include <rte_security.h>
46 #include <rte_eventdev.h>
48 #include <rte_ip_frag.h>
49 #include <rte_alarm.h>
51 #include "event_helper.h"
54 #include "ipsec_worker.h"
58 volatile bool force_quit;
60 #define MAX_JUMBO_PKT_LEN 9600
62 #define MEMPOOL_CACHE_SIZE 256
64 #define CDEV_QUEUE_DESC 2048
65 #define CDEV_MAP_ENTRIES 16384
66 #define CDEV_MP_CACHE_SZ 64
67 #define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
68 #define MAX_QUEUE_PAIRS 1
70 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
72 /* Configure how many packets ahead to prefetch, when reading packets */
73 #define PREFETCH_OFFSET 3
75 #define MAX_RX_QUEUE_PER_LCORE 16
77 #define MAX_LCORE_PARAMS 1024
80 * Configurable number of RX/TX ring descriptors
82 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
83 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
84 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
85 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
87 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
88 (addr)->addr_bytes[0], (addr)->addr_bytes[1], \
89 (addr)->addr_bytes[2], (addr)->addr_bytes[3], \
90 (addr)->addr_bytes[4], (addr)->addr_bytes[5], \
93 #define FRAG_TBL_BUCKET_ENTRIES 4
94 #define MAX_FRAG_TTL_NS (10LL * NS_PER_S)
96 #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
98 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
99 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
100 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
101 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
102 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
105 struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS];
107 #define CMD_LINE_OPT_CONFIG "config"
108 #define CMD_LINE_OPT_SINGLE_SA "single-sa"
109 #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
110 #define CMD_LINE_OPT_TRANSFER_MODE "transfer-mode"
111 #define CMD_LINE_OPT_SCHEDULE_TYPE "event-schedule-type"
112 #define CMD_LINE_OPT_RX_OFFLOAD "rxoffload"
113 #define CMD_LINE_OPT_TX_OFFLOAD "txoffload"
114 #define CMD_LINE_OPT_REASSEMBLE "reassemble"
115 #define CMD_LINE_OPT_MTU "mtu"
116 #define CMD_LINE_OPT_FRAG_TTL "frag-ttl"
118 #define CMD_LINE_ARG_EVENT "event"
119 #define CMD_LINE_ARG_POLL "poll"
120 #define CMD_LINE_ARG_ORDERED "ordered"
121 #define CMD_LINE_ARG_ATOMIC "atomic"
122 #define CMD_LINE_ARG_PARALLEL "parallel"
125 /* long options mapped to a short option */
127 /* first long only option value must be >= 256, so that we won't
128 * conflict with short options
130 CMD_LINE_OPT_MIN_NUM = 256,
131 CMD_LINE_OPT_CONFIG_NUM,
132 CMD_LINE_OPT_SINGLE_SA_NUM,
133 CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
134 CMD_LINE_OPT_TRANSFER_MODE_NUM,
135 CMD_LINE_OPT_SCHEDULE_TYPE_NUM,
136 CMD_LINE_OPT_RX_OFFLOAD_NUM,
137 CMD_LINE_OPT_TX_OFFLOAD_NUM,
138 CMD_LINE_OPT_REASSEMBLE_NUM,
139 CMD_LINE_OPT_MTU_NUM,
140 CMD_LINE_OPT_FRAG_TTL_NUM,
143 static const struct option lgopts[] = {
144 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
145 {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
146 {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
147 {CMD_LINE_OPT_TRANSFER_MODE, 1, 0, CMD_LINE_OPT_TRANSFER_MODE_NUM},
148 {CMD_LINE_OPT_SCHEDULE_TYPE, 1, 0, CMD_LINE_OPT_SCHEDULE_TYPE_NUM},
149 {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
150 {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
151 {CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
152 {CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
153 {CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM},
157 uint32_t unprotected_port_mask;
158 uint32_t single_sa_idx;
159 /* mask of enabled ports */
160 static uint32_t enabled_port_mask;
161 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
162 static int32_t promiscuous_on = 1;
163 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
164 static uint32_t nb_lcores;
165 static uint32_t single_sa;
166 static uint32_t nb_bufs_in_pool;
169 * RX/TX HW offload capabilities to enable/use on ethernet ports.
170 * By default all capabilities are enabled.
172 static uint64_t dev_rx_offload = UINT64_MAX;
173 static uint64_t dev_tx_offload = UINT64_MAX;
176 * global values that determine multi-seg policy
178 static uint32_t frag_tbl_sz;
179 static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
180 static uint32_t mtu_size = RTE_ETHER_MTU;
181 static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
183 /* application wide librte_ipsec/SA parameters */
184 struct app_sa_prm app_sa_prm = {
186 .cache_sz = SA_CACHE_SZ,
189 static const char *cfgfile;
191 struct lcore_rx_queue {
194 } __rte_cache_aligned;
196 struct lcore_params {
200 } __rte_cache_aligned;
202 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
204 static struct lcore_params *lcore_params;
205 static uint16_t nb_lcore_params;
207 static struct rte_hash *cdev_map_in;
208 static struct rte_hash *cdev_map_out;
212 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
216 uint16_t nb_rx_queue;
217 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
218 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
219 struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
220 struct ipsec_ctx inbound;
221 struct ipsec_ctx outbound;
222 struct rt_ctx *rt4_ctx;
223 struct rt_ctx *rt6_ctx;
225 struct rte_ip_frag_tbl *tbl;
226 struct rte_mempool *pool_dir;
227 struct rte_mempool *pool_indir;
228 struct rte_ip_frag_death_row dr;
230 } __rte_cache_aligned;
232 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
234 static struct rte_eth_conf port_conf = {
236 .mq_mode = RTE_ETH_MQ_RX_RSS,
238 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
243 .rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
244 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
248 .mq_mode = RTE_ETH_MQ_TX_NONE,
252 struct socket_ctx socket_ctx[NB_SOCKETS];
255 * Determine is multi-segment support required:
256 * - either frame buffer size is smaller then mtu
257 * - or reassmeble support is requested
260 multi_seg_required(void)
262 return (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM >
263 frame_buf_size || frag_tbl_sz != 0);
267 adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
272 plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
273 if (plen < m->pkt_len) {
274 trim = m->pkt_len - plen;
275 rte_pktmbuf_trim(m, trim);
280 adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
285 plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
286 if (plen < m->pkt_len) {
287 trim = m->pkt_len - plen;
288 rte_pktmbuf_trim(m, trim);
292 #if (STATS_INTERVAL > 0)
294 struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
296 /* Print out statistics on packet distribution */
298 print_stats_cb(__rte_unused void *param)
300 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
301 float burst_percent, rx_per_call, tx_per_call;
304 total_packets_dropped = 0;
305 total_packets_tx = 0;
306 total_packets_rx = 0;
308 const char clr[] = { 27, '[', '2', 'J', '\0' };
309 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
311 /* Clear screen and move to top left */
312 printf("%s%s", clr, topLeft);
314 printf("\nCore statistics ====================================");
316 for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
317 /* skip disabled cores */
318 if (rte_lcore_is_enabled(coreid) == 0)
320 burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
321 core_statistics[coreid].rx;
322 rx_per_call = (float)(core_statistics[coreid].rx)/
323 core_statistics[coreid].rx_call;
324 tx_per_call = (float)(core_statistics[coreid].tx)/
325 core_statistics[coreid].tx_call;
326 printf("\nStatistics for core %u ------------------------------"
327 "\nPackets received: %20"PRIu64
328 "\nPackets sent: %24"PRIu64
329 "\nPackets dropped: %21"PRIu64
330 "\nBurst percent: %23.2f"
331 "\nPackets per Rx call: %17.2f"
332 "\nPackets per Tx call: %17.2f",
334 core_statistics[coreid].rx,
335 core_statistics[coreid].tx,
336 core_statistics[coreid].dropped,
341 total_packets_dropped += core_statistics[coreid].dropped;
342 total_packets_tx += core_statistics[coreid].tx;
343 total_packets_rx += core_statistics[coreid].rx;
345 printf("\nAggregate statistics ==============================="
346 "\nTotal packets received: %14"PRIu64
347 "\nTotal packets sent: %18"PRIu64
348 "\nTotal packets dropped: %15"PRIu64,
351 total_packets_dropped);
352 printf("\n====================================================\n");
354 rte_eal_alarm_set(STATS_INTERVAL * US_PER_S, print_stats_cb, NULL);
356 #endif /* STATS_INTERVAL */
359 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
361 const struct rte_ether_hdr *eth;
362 const struct rte_ipv4_hdr *iph4;
363 const struct rte_ipv6_hdr *iph6;
364 const struct rte_udp_hdr *udp;
365 uint16_t ip4_hdr_len;
368 eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
369 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
371 iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
373 adjust_ipv4_pktlen(pkt, iph4, 0);
375 switch (iph4->next_proto_id) {
377 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
380 if (app_sa_prm.udp_encap == 1) {
381 ip4_hdr_len = ((iph4->version_ihl &
382 RTE_IPV4_HDR_IHL_MASK) *
383 RTE_IPV4_IHL_MULTIPLIER);
384 udp = rte_pktmbuf_mtod_offset(pkt,
385 struct rte_udp_hdr *, ip4_hdr_len);
386 nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
387 if (udp->src_port == nat_port ||
388 udp->dst_port == nat_port){
389 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
391 MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
397 t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
398 t->ip4.pkts[(t->ip4.num)++] = pkt;
401 pkt->l3_len = sizeof(*iph4);
402 pkt->packet_type |= RTE_PTYPE_L3_IPV4;
403 if (pkt->packet_type & RTE_PTYPE_L4_TCP)
404 pkt->l4_len = sizeof(struct rte_tcp_hdr);
405 else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
406 pkt->l4_len = sizeof(struct rte_udp_hdr);
407 } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
409 size_t l3len, ext_len;
412 /* get protocol type */
413 iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
415 adjust_ipv6_pktlen(pkt, iph6, 0);
417 next_proto = iph6->proto;
419 /* determine l3 header size up to ESP extension */
420 l3len = sizeof(struct ip6_hdr);
421 p = rte_pktmbuf_mtod(pkt, uint8_t *);
422 while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
423 (next_proto = rte_ipv6_get_next_ext(p + l3len,
424 next_proto, &ext_len)) >= 0)
427 /* drop packet when IPv6 header exceeds first segment length */
428 if (unlikely(l3len > pkt->data_len)) {
433 switch (next_proto) {
435 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
438 if (app_sa_prm.udp_encap == 1) {
439 udp = rte_pktmbuf_mtod_offset(pkt,
440 struct rte_udp_hdr *, l3len);
441 nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
442 if (udp->src_port == nat_port ||
443 udp->dst_port == nat_port){
444 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
446 MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
452 t->ip6.data[t->ip6.num] = &iph6->proto;
453 t->ip6.pkts[(t->ip6.num)++] = pkt;
457 pkt->packet_type |= RTE_PTYPE_L3_IPV6;
459 /* Unknown/Unsupported type, drop the packet */
460 RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
461 rte_be_to_cpu_16(eth->ether_type));
466 /* Check if the packet has been processed inline. For inline protocol
467 * processed packets, the metadata in the mbuf can be used to identify
468 * the security processing done on the packet. The metadata will be
469 * used to retrieve the application registered userdata associated
470 * with the security session.
473 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
474 rte_security_dynfield_is_registered()) {
476 struct ipsec_mbuf_metadata *priv;
477 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
478 rte_eth_dev_get_sec_ctx(
481 /* Retrieve the userdata registered. Here, the userdata
482 * registered is the SA pointer.
484 sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
485 *rte_security_dynfield(pkt));
487 /* userdata could not be retrieved */
491 /* Save SA as priv member in mbuf. This will be used in the
492 * IPsec selector(SP-SA) check.
495 priv = get_priv(pkt);
501 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
510 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
511 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
513 prepare_one_packet(pkts[i], t);
515 /* Process left packets */
516 for (; i < nb_pkts; i++)
517 prepare_one_packet(pkts[i], t);
521 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
522 const struct lcore_conf *qconf)
525 struct rte_ether_hdr *ethhdr;
527 ip = rte_pktmbuf_mtod(pkt, struct ip *);
529 ethhdr = (struct rte_ether_hdr *)
530 rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
532 if (ip->ip_v == IPVERSION) {
533 pkt->ol_flags |= qconf->outbound.ipv4_offloads;
534 pkt->l3_len = sizeof(struct ip);
535 pkt->l2_len = RTE_ETHER_HDR_LEN;
539 /* calculate IPv4 cksum in SW */
540 if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
541 ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
543 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
545 pkt->ol_flags |= qconf->outbound.ipv6_offloads;
546 pkt->l3_len = sizeof(struct ip6_hdr);
547 pkt->l2_len = RTE_ETHER_HDR_LEN;
549 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
552 memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
553 sizeof(struct rte_ether_addr));
554 memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
555 sizeof(struct rte_ether_addr));
559 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
560 const struct lcore_conf *qconf)
563 const int32_t prefetch_offset = 2;
565 for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
566 rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
567 prepare_tx_pkt(pkts[i], port, qconf);
569 /* Process left packets */
570 for (; i < nb_pkts; i++)
571 prepare_tx_pkt(pkts[i], port, qconf);
574 /* Send burst of packets on an output interface */
575 static inline int32_t
576 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
578 struct rte_mbuf **m_table;
582 queueid = qconf->tx_queue_id[port];
583 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
585 prepare_tx_burst(m_table, n, port, qconf);
587 ret = rte_eth_tx_burst(port, queueid, m_table, n);
589 core_stats_update_tx(ret);
591 if (unlikely(ret < n)) {
593 free_pkts(&m_table[ret], 1);
601 * Helper function to fragment and queue for TX one packet.
603 static inline uint32_t
604 send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
605 uint16_t port, uint8_t proto)
611 tbl = qconf->tx_mbufs + port;
614 /* free space for new fragments */
615 if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
616 send_burst(qconf, len, port);
620 n = RTE_DIM(tbl->m_table) - len;
622 if (proto == IPPROTO_IP)
623 rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
624 n, mtu_size, qconf->frag.pool_dir,
625 qconf->frag.pool_indir);
627 rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
628 n, mtu_size, qconf->frag.pool_dir,
629 qconf->frag.pool_indir);
635 "%s: failed to fragment packet with size %u, "
637 __func__, m->pkt_len, rte_errno);
643 /* Enqueue a single packet, and send burst if queue is filled */
644 static inline int32_t
645 send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
649 struct lcore_conf *qconf;
651 lcore_id = rte_lcore_id();
653 qconf = &lcore_conf[lcore_id];
654 len = qconf->tx_mbufs[port].len;
656 if (m->pkt_len <= mtu_size) {
657 qconf->tx_mbufs[port].m_table[len] = m;
660 /* need to fragment the packet */
661 } else if (frag_tbl_sz > 0)
662 len = send_fragment_packet(qconf, m, port, proto);
666 /* enough pkts to be sent */
667 if (unlikely(len == MAX_PKT_BURST)) {
668 send_burst(qconf, MAX_PKT_BURST, port);
672 qconf->tx_mbufs[port].len = len;
677 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
681 uint32_t i, j, res, sa_idx;
683 if (ip->num == 0 || sp == NULL)
686 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
687 ip->num, DEFAULT_MAX_CATEGORIES);
690 for (i = 0; i < ip->num; i++) {
697 if (res == DISCARD) {
702 /* Only check SPI match for processed IPSec packets */
703 if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
709 if (!inbound_sa_check(sa, m, sa_idx)) {
719 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
728 for (i = 0; i < num; i++) {
731 ip = rte_pktmbuf_mtod(m, struct ip *);
733 if (ip->ip_v == IPVERSION) {
734 trf->ip4.pkts[n4] = m;
735 trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
736 uint8_t *, offsetof(struct ip, ip_p));
738 } else if (ip->ip_v == IP6_VERSION) {
739 trf->ip6.pkts[n6] = m;
740 trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
742 offsetof(struct ip6_hdr, ip6_nxt));
754 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
755 struct ipsec_traffic *traffic)
757 uint16_t nb_pkts_in, n_ip4, n_ip6;
759 n_ip4 = traffic->ip4.num;
760 n_ip6 = traffic->ip6.num;
762 if (app_sa_prm.enable == 0) {
763 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
764 traffic->ipsec.num, MAX_PKT_BURST);
765 split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
767 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
768 traffic->ipsec.saptr, traffic->ipsec.num);
769 ipsec_process(ipsec_ctx, traffic);
772 inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
775 inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
780 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
781 struct traffic_type *ipsec)
784 uint32_t i, j, sa_idx;
786 if (ip->num == 0 || sp == NULL)
789 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
790 ip->num, DEFAULT_MAX_CATEGORIES);
793 for (i = 0; i < ip->num; i++) {
795 sa_idx = ip->res[i] - 1;
796 if (ip->res[i] == DISCARD)
798 else if (ip->res[i] == BYPASS)
801 ipsec->res[ipsec->num] = sa_idx;
802 ipsec->pkts[ipsec->num++] = m;
809 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
810 struct ipsec_traffic *traffic)
813 uint16_t idx, nb_pkts_out, i;
815 /* Drop any IPsec traffic from protected ports */
816 free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
818 traffic->ipsec.num = 0;
820 outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
822 outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
824 if (app_sa_prm.enable == 0) {
826 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
827 traffic->ipsec.res, traffic->ipsec.num,
830 for (i = 0; i < nb_pkts_out; i++) {
831 m = traffic->ipsec.pkts[i];
832 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
833 if (ip->ip_v == IPVERSION) {
834 idx = traffic->ip4.num++;
835 traffic->ip4.pkts[idx] = m;
837 idx = traffic->ip6.num++;
838 traffic->ip6.pkts[idx] = m;
842 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
843 traffic->ipsec.saptr, traffic->ipsec.num);
844 ipsec_process(ipsec_ctx, traffic);
849 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
850 struct ipsec_traffic *traffic)
853 uint32_t nb_pkts_in, i, idx;
855 if (app_sa_prm.enable == 0) {
857 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
858 traffic->ipsec.num, MAX_PKT_BURST);
860 for (i = 0; i < nb_pkts_in; i++) {
861 m = traffic->ipsec.pkts[i];
862 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
863 if (ip->ip_v == IPVERSION) {
864 idx = traffic->ip4.num++;
865 traffic->ip4.pkts[idx] = m;
867 idx = traffic->ip6.num++;
868 traffic->ip6.pkts[idx] = m;
872 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
873 traffic->ipsec.saptr, traffic->ipsec.num);
874 ipsec_process(ipsec_ctx, traffic);
879 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
880 struct ipsec_traffic *traffic)
883 uint32_t nb_pkts_out, i, n;
886 /* Drop any IPsec traffic from protected ports */
887 free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
891 for (i = 0; i < traffic->ip4.num; i++) {
892 traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
893 traffic->ipsec.res[n++] = single_sa_idx;
896 for (i = 0; i < traffic->ip6.num; i++) {
897 traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
898 traffic->ipsec.res[n++] = single_sa_idx;
901 traffic->ip4.num = 0;
902 traffic->ip6.num = 0;
903 traffic->ipsec.num = n;
905 if (app_sa_prm.enable == 0) {
907 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
908 traffic->ipsec.res, traffic->ipsec.num,
911 /* They all sue the same SA (ip4 or ip6 tunnel) */
912 m = traffic->ipsec.pkts[0];
913 ip = rte_pktmbuf_mtod(m, struct ip *);
914 if (ip->ip_v == IPVERSION) {
915 traffic->ip4.num = nb_pkts_out;
916 for (i = 0; i < nb_pkts_out; i++)
917 traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
919 traffic->ip6.num = nb_pkts_out;
920 for (i = 0; i < nb_pkts_out; i++)
921 traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
924 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
925 traffic->ipsec.saptr, traffic->ipsec.num);
926 ipsec_process(ipsec_ctx, traffic);
930 static inline int32_t
931 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
933 struct ipsec_mbuf_metadata *priv;
936 priv = get_priv(pkt);
939 if (unlikely(sa == NULL)) {
940 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
948 return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
959 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
961 uint32_t hop[MAX_PKT_BURST * 2];
962 uint32_t dst_ip[MAX_PKT_BURST * 2];
965 uint16_t lpm_pkts = 0;
970 /* Need to do an LPM lookup for non-inline packets. Inline packets will
971 * have port ID in the SA
974 for (i = 0; i < nb_pkts; i++) {
975 if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
976 /* Security offload not enabled. So an LPM lookup is
977 * required to get the hop
979 offset = offsetof(struct ip, ip_dst);
980 dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
982 dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
987 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
991 for (i = 0; i < nb_pkts; i++) {
992 if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
993 /* Read hop from the SA */
994 pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
996 /* Need to use hop returned by lookup */
997 pkt_hop = hop[lpm_pkts++];
1000 if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
1001 free_pkts(&pkts[i], 1);
1004 send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
1009 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
1011 int32_t hop[MAX_PKT_BURST * 2];
1012 uint8_t dst_ip[MAX_PKT_BURST * 2][16];
1014 int32_t pkt_hop = 0;
1016 uint16_t lpm_pkts = 0;
1021 /* Need to do an LPM lookup for non-inline packets. Inline packets will
1022 * have port ID in the SA
1025 for (i = 0; i < nb_pkts; i++) {
1026 if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
1027 /* Security offload not enabled. So an LPM lookup is
1028 * required to get the hop
1030 offset = offsetof(struct ip6_hdr, ip6_dst);
1031 ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
1033 memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
1038 rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
1043 for (i = 0; i < nb_pkts; i++) {
1044 if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
1045 /* Read hop from the SA */
1046 pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
1048 /* Need to use hop returned by lookup */
1049 pkt_hop = hop[lpm_pkts++];
1052 if (pkt_hop == -1) {
1053 free_pkts(&pkts[i], 1);
1056 send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
1061 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
1062 uint8_t nb_pkts, uint16_t portid)
1064 struct ipsec_traffic traffic;
1066 prepare_traffic(pkts, &traffic, nb_pkts);
1068 if (unlikely(single_sa)) {
1069 if (is_unprotected_port(portid))
1070 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
1072 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
1074 if (is_unprotected_port(portid))
1075 process_pkts_inbound(&qconf->inbound, &traffic);
1077 process_pkts_outbound(&qconf->outbound, &traffic);
1080 route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
1081 route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
1085 drain_tx_buffers(struct lcore_conf *qconf)
1090 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1091 buf = &qconf->tx_mbufs[portid];
1094 send_burst(qconf, buf->len, portid);
1100 drain_crypto_buffers(struct lcore_conf *qconf)
1103 struct ipsec_ctx *ctx;
1105 /* drain inbound buffers*/
1106 ctx = &qconf->inbound;
1107 for (i = 0; i != ctx->nb_qps; i++) {
1108 if (ctx->tbl[i].len != 0)
1109 enqueue_cop_burst(ctx->tbl + i);
1112 /* drain outbound buffers*/
1113 ctx = &qconf->outbound;
1114 for (i = 0; i != ctx->nb_qps; i++) {
1115 if (ctx->tbl[i].len != 0)
1116 enqueue_cop_burst(ctx->tbl + i);
1121 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
1122 struct ipsec_ctx *ctx)
1125 struct ipsec_traffic trf;
1127 if (app_sa_prm.enable == 0) {
1129 /* dequeue packets from crypto-queue */
1130 n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
1131 RTE_DIM(trf.ipsec.pkts));
1136 /* split traffic by ipv4-ipv6 */
1137 split46_traffic(&trf, trf.ipsec.pkts, n);
1139 ipsec_cqp_process(ctx, &trf);
1141 /* process ipv4 packets */
1142 if (trf.ip4.num != 0) {
1143 inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
1144 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
1147 /* process ipv6 packets */
1148 if (trf.ip6.num != 0) {
1149 inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
1150 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1155 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
1156 struct ipsec_ctx *ctx)
1159 struct ipsec_traffic trf;
1161 if (app_sa_prm.enable == 0) {
1163 /* dequeue packets from crypto-queue */
1164 n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
1165 RTE_DIM(trf.ipsec.pkts));
1170 /* split traffic by ipv4-ipv6 */
1171 split46_traffic(&trf, trf.ipsec.pkts, n);
1173 ipsec_cqp_process(ctx, &trf);
1175 /* process ipv4 packets */
1176 if (trf.ip4.num != 0)
1177 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
1179 /* process ipv6 packets */
1180 if (trf.ip6.num != 0)
1181 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1184 /* main processing loop */
1186 ipsec_poll_mode_worker(void)
1188 struct rte_mbuf *pkts[MAX_PKT_BURST];
1190 uint64_t prev_tsc, diff_tsc, cur_tsc;
1194 struct lcore_conf *qconf;
1195 int32_t rc, socket_id;
1196 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1197 / US_PER_S * BURST_TX_DRAIN_US;
1198 struct lcore_rx_queue *rxql;
1201 lcore_id = rte_lcore_id();
1202 qconf = &lcore_conf[lcore_id];
1203 rxql = qconf->rx_queue_list;
1204 socket_id = rte_lcore_to_socket_id(lcore_id);
1206 qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
1207 qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
1208 qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
1209 qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
1210 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
1211 qconf->inbound.cdev_map = cdev_map_in;
1212 qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
1213 qconf->inbound.session_priv_pool =
1214 socket_ctx[socket_id].session_priv_pool;
1215 qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
1216 qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
1217 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
1218 qconf->outbound.cdev_map = cdev_map_out;
1219 qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
1220 qconf->outbound.session_priv_pool =
1221 socket_ctx[socket_id].session_priv_pool;
1222 qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;
1223 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1225 rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
1228 "SAD cache init on lcore %u, failed with code: %d\n",
1233 if (qconf->nb_rx_queue == 0) {
1234 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1239 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1241 for (i = 0; i < qconf->nb_rx_queue; i++) {
1242 portid = rxql[i].port_id;
1243 queueid = rxql[i].queue_id;
1244 RTE_LOG(INFO, IPSEC,
1245 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1246 lcore_id, portid, queueid);
1249 while (!force_quit) {
1250 cur_tsc = rte_rdtsc();
1252 /* TX queue buffer drain */
1253 diff_tsc = cur_tsc - prev_tsc;
1255 if (unlikely(diff_tsc > drain_tsc)) {
1256 drain_tx_buffers(qconf);
1257 drain_crypto_buffers(qconf);
1261 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1263 /* Read packets from RX queues */
1264 portid = rxql[i].port_id;
1265 queueid = rxql[i].queue_id;
1266 nb_rx = rte_eth_rx_burst(portid, queueid,
1267 pkts, MAX_PKT_BURST);
1270 core_stats_update_rx(nb_rx);
1271 process_pkts(qconf, pkts, nb_rx, portid);
1274 /* dequeue and process completed crypto-ops */
1275 if (is_unprotected_port(portid))
1276 drain_inbound_crypto_queues(qconf,
1279 drain_outbound_crypto_queues(qconf,
1286 check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
1292 for (i = 0; i < nb_lcore_params; ++i) {
1293 portid = lcore_params_array[i].port_id;
1294 if (portid == fdir_portid) {
1295 queueid = lcore_params_array[i].queue_id;
1296 if (queueid == fdir_qid)
1300 if (i == nb_lcore_params - 1)
1308 check_poll_mode_params(struct eh_conf *eh_conf)
1318 if (eh_conf->mode != EH_PKT_TRANSFER_MODE_POLL)
1321 if (lcore_params == NULL) {
1322 printf("Error: No port/queue/core mappings\n");
1326 for (i = 0; i < nb_lcore_params; ++i) {
1327 lcore = lcore_params[i].lcore_id;
1328 if (!rte_lcore_is_enabled(lcore)) {
1329 printf("error: lcore %hhu is not enabled in "
1330 "lcore mask\n", lcore);
1333 socket_id = rte_lcore_to_socket_id(lcore);
1334 if (socket_id != 0 && numa_on == 0) {
1335 printf("warning: lcore %hhu is on socket %d "
1339 portid = lcore_params[i].port_id;
1340 if ((enabled_port_mask & (1 << portid)) == 0) {
1341 printf("port %u is not enabled in port mask\n", portid);
1344 if (!rte_eth_dev_is_valid_port(portid)) {
1345 printf("port %u is not present on the board\n", portid);
1353 get_port_nb_rx_queues(const uint16_t port)
1358 for (i = 0; i < nb_lcore_params; ++i) {
1359 if (lcore_params[i].port_id == port &&
1360 lcore_params[i].queue_id > queue)
1361 queue = lcore_params[i].queue_id;
1363 return (uint8_t)(++queue);
1367 init_lcore_rx_queues(void)
1369 uint16_t i, nb_rx_queue;
1372 for (i = 0; i < nb_lcore_params; ++i) {
1373 lcore = lcore_params[i].lcore_id;
1374 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
1375 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1376 printf("error: too many queues (%u) for lcore: %u\n",
1377 nb_rx_queue + 1, lcore);
1380 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1381 lcore_params[i].port_id;
1382 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1383 lcore_params[i].queue_id;
1384 lcore_conf[lcore].nb_rx_queue++;
1391 print_usage(const char *prgname)
1393 fprintf(stderr, "%s [EAL options] --"
1399 " [-w REPLAY_WINDOW_SIZE]"
1403 " [-s NUMBER_OF_MBUFS_IN_PKT_POOL]"
1405 " --config (port,queue,lcore)[,(port,queue,lcore)]"
1406 " [--single-sa SAIDX]"
1407 " [--cryptodev_mask MASK]"
1408 " [--transfer-mode MODE]"
1409 " [--event-schedule-type TYPE]"
1410 " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
1411 " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
1412 " [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
1413 " [--" CMD_LINE_OPT_MTU " MTU]"
1415 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
1416 " -P : Enable promiscuous mode\n"
1417 " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
1418 " -j FRAMESIZE: Data buffer size, minimum (and default)\n"
1419 " value: RTE_MBUF_DEFAULT_BUF_SIZE\n"
1420 " -l enables code-path that uses librte_ipsec\n"
1421 " -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n"
1422 " size for each SA\n"
1424 " -a enables SA SQN atomic behaviour\n"
1425 " -c specifies inbound SAD cache size,\n"
1426 " zero value disables the cache (default value: 128)\n"
1427 " -s number of mbufs in packet pool, if not specified number\n"
1428 " of mbufs will be calculated based on number of cores,\n"
1429 " ports and crypto queues\n"
1430 " -f CONFIG_FILE: Configuration file\n"
1431 " --config (port,queue,lcore): Rx queue configuration. In poll\n"
1432 " mode determines which queues from\n"
1433 " which ports are mapped to which cores.\n"
1434 " In event mode this option is not used\n"
1435 " as packets are dynamically scheduled\n"
1436 " to cores by HW.\n"
1437 " --single-sa SAIDX: In poll mode use single SA index for\n"
1438 " outbound traffic, bypassing the SP\n"
1439 " In event mode selects driver submode,\n"
1440 " SA index value is ignored\n"
1441 " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
1442 " devices to configure\n"
1443 " --transfer-mode MODE\n"
1444 " \"poll\" : Packet transfer via polling (default)\n"
1445 " \"event\" : Packet transfer via event device\n"
1446 " --event-schedule-type TYPE queue schedule type, used only when\n"
1447 " transfer mode is set to event\n"
1448 " \"ordered\" : Ordered (default)\n"
1449 " \"atomic\" : Atomic\n"
1450 " \"parallel\" : Parallel\n"
1451 " --" CMD_LINE_OPT_RX_OFFLOAD
1452 ": bitmask of the RX HW offload capabilities to enable/use\n"
1453 " (RTE_ETH_RX_OFFLOAD_*)\n"
1454 " --" CMD_LINE_OPT_TX_OFFLOAD
1455 ": bitmask of the TX HW offload capabilities to enable/use\n"
1456 " (RTE_ETH_TX_OFFLOAD_*)\n"
1457 " --" CMD_LINE_OPT_REASSEMBLE " NUM"
1458 ": max number of entries in reassemble(fragment) table\n"
1459 " (zero (default value) disables reassembly)\n"
1460 " --" CMD_LINE_OPT_MTU " MTU"
1461 ": MTU value on all ports (default value: 1500)\n"
1462 " outgoing packets with bigger size will be fragmented\n"
1463 " incoming packets with bigger size will be discarded\n"
1464 " --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS"
1465 ": fragments lifetime in nanoseconds, default\n"
1466 " and maximum value is 10.000.000.000 ns (10 s)\n"
1472 parse_mask(const char *str, uint64_t *val)
1478 t = strtoul(str, &end, 0);
1479 if (errno != 0 || end[0] != 0)
1487 parse_portmask(const char *portmask)
1494 /* parse hexadecimal string */
1495 pm = strtoul(portmask, &end, 16);
1496 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1499 if ((pm == 0) && errno)
1506 parse_decimal(const char *str)
1511 num = strtoull(str, &end, 10);
1512 if ((str[0] == '\0') || (end == NULL) || (*end != '\0')
1520 parse_config(const char *q_arg)
1523 const char *p, *p0 = q_arg;
1531 unsigned long int_fld[_NUM_FLD];
1532 char *str_fld[_NUM_FLD];
1536 nb_lcore_params = 0;
1538 while ((p = strchr(p0, '(')) != NULL) {
1540 p0 = strchr(p, ')');
1545 if (size >= sizeof(s))
1548 snprintf(s, sizeof(s), "%.*s", size, p);
1549 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1552 for (i = 0; i < _NUM_FLD; i++) {
1554 int_fld[i] = strtoul(str_fld[i], &end, 0);
1555 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1558 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1559 printf("exceeded max number of lcore params: %hu\n",
1563 lcore_params_array[nb_lcore_params].port_id =
1564 (uint8_t)int_fld[FLD_PORT];
1565 lcore_params_array[nb_lcore_params].queue_id =
1566 (uint8_t)int_fld[FLD_QUEUE];
1567 lcore_params_array[nb_lcore_params].lcore_id =
1568 (uint8_t)int_fld[FLD_LCORE];
1571 lcore_params = lcore_params_array;
1576 print_app_sa_prm(const struct app_sa_prm *prm)
1578 printf("librte_ipsec usage: %s\n",
1579 (prm->enable == 0) ? "disabled" : "enabled");
1581 printf("replay window size: %u\n", prm->window_size);
1582 printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
1583 printf("SA flags: %#" PRIx64 "\n", prm->flags);
1584 printf("Frag TTL: %" PRIu64 " ns\n", frag_ttl_ns);
1588 parse_transfer_mode(struct eh_conf *conf, const char *optarg)
1590 if (!strcmp(CMD_LINE_ARG_POLL, optarg))
1591 conf->mode = EH_PKT_TRANSFER_MODE_POLL;
1592 else if (!strcmp(CMD_LINE_ARG_EVENT, optarg))
1593 conf->mode = EH_PKT_TRANSFER_MODE_EVENT;
1595 printf("Unsupported packet transfer mode\n");
1603 parse_schedule_type(struct eh_conf *conf, const char *optarg)
1605 struct eventmode_conf *em_conf = NULL;
1607 /* Get eventmode conf */
1608 em_conf = conf->mode_params;
1610 if (!strcmp(CMD_LINE_ARG_ORDERED, optarg))
1611 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
1612 else if (!strcmp(CMD_LINE_ARG_ATOMIC, optarg))
1613 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ATOMIC;
1614 else if (!strcmp(CMD_LINE_ARG_PARALLEL, optarg))
1615 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_PARALLEL;
1617 printf("Unsupported queue schedule type\n");
1625 parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
1630 int32_t option_index;
1631 char *prgname = argv[0];
1632 int32_t f_present = 0;
1636 while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:s:",
1637 lgopts, &option_index)) != EOF) {
1641 enabled_port_mask = parse_portmask(optarg);
1642 if (enabled_port_mask == 0) {
1643 printf("invalid portmask\n");
1644 print_usage(prgname);
1649 printf("Promiscuous mode selected\n");
1653 unprotected_port_mask = parse_portmask(optarg);
1654 if (unprotected_port_mask == 0) {
1655 printf("invalid unprotected portmask\n");
1656 print_usage(prgname);
1661 if (f_present == 1) {
1662 printf("\"-f\" option present more than "
1664 print_usage(prgname);
1672 ret = parse_decimal(optarg);
1674 printf("Invalid number of buffers in a pool: "
1676 print_usage(prgname);
1680 nb_bufs_in_pool = ret;
1684 ret = parse_decimal(optarg);
1685 if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
1687 printf("Invalid frame buffer size value: %s\n",
1689 print_usage(prgname);
1692 frame_buf_size = ret;
1693 printf("Custom frame buffer size %u\n", frame_buf_size);
1696 app_sa_prm.enable = 1;
1699 app_sa_prm.window_size = parse_decimal(optarg);
1702 app_sa_prm.enable_esn = 1;
1705 app_sa_prm.enable = 1;
1706 app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
1709 ret = parse_decimal(optarg);
1711 printf("Invalid SA cache size: %s\n", optarg);
1712 print_usage(prgname);
1715 app_sa_prm.cache_sz = ret;
1717 case CMD_LINE_OPT_CONFIG_NUM:
1718 ret = parse_config(optarg);
1720 printf("Invalid config\n");
1721 print_usage(prgname);
1725 case CMD_LINE_OPT_SINGLE_SA_NUM:
1726 ret = parse_decimal(optarg);
1727 if (ret == -1 || ret > UINT32_MAX) {
1728 printf("Invalid argument[sa_idx]\n");
1729 print_usage(prgname);
1735 single_sa_idx = ret;
1736 eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1737 printf("Configured with single SA index %u\n",
1740 case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1741 ret = parse_portmask(optarg);
1743 printf("Invalid argument[portmask]\n");
1744 print_usage(prgname);
1749 enabled_cryptodev_mask = ret;
1752 case CMD_LINE_OPT_TRANSFER_MODE_NUM:
1753 ret = parse_transfer_mode(eh_conf, optarg);
1755 printf("Invalid packet transfer mode\n");
1756 print_usage(prgname);
1761 case CMD_LINE_OPT_SCHEDULE_TYPE_NUM:
1762 ret = parse_schedule_type(eh_conf, optarg);
1764 printf("Invalid queue schedule type\n");
1765 print_usage(prgname);
1770 case CMD_LINE_OPT_RX_OFFLOAD_NUM:
1771 ret = parse_mask(optarg, &dev_rx_offload);
1773 printf("Invalid argument for \'%s\': %s\n",
1774 CMD_LINE_OPT_RX_OFFLOAD, optarg);
1775 print_usage(prgname);
1779 case CMD_LINE_OPT_TX_OFFLOAD_NUM:
1780 ret = parse_mask(optarg, &dev_tx_offload);
1782 printf("Invalid argument for \'%s\': %s\n",
1783 CMD_LINE_OPT_TX_OFFLOAD, optarg);
1784 print_usage(prgname);
1788 case CMD_LINE_OPT_REASSEMBLE_NUM:
1789 ret = parse_decimal(optarg);
1790 if (ret < 0 || ret > UINT32_MAX) {
1791 printf("Invalid argument for \'%s\': %s\n",
1792 CMD_LINE_OPT_REASSEMBLE, optarg);
1793 print_usage(prgname);
1798 case CMD_LINE_OPT_MTU_NUM:
1799 ret = parse_decimal(optarg);
1800 if (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) {
1801 printf("Invalid argument for \'%s\': %s\n",
1802 CMD_LINE_OPT_MTU, optarg);
1803 print_usage(prgname);
1808 case CMD_LINE_OPT_FRAG_TTL_NUM:
1809 ret = parse_decimal(optarg);
1810 if (ret < 0 || ret > MAX_FRAG_TTL_NS) {
1811 printf("Invalid argument for \'%s\': %s\n",
1812 CMD_LINE_OPT_MTU, optarg);
1813 print_usage(prgname);
1819 print_usage(prgname);
1824 if (f_present == 0) {
1825 printf("Mandatory option \"-f\" not present\n");
1829 /* check do we need to enable multi-seg support */
1830 if (multi_seg_required()) {
1831 /* legacy mode doesn't support multi-seg */
1832 app_sa_prm.enable = 1;
1833 printf("frame buf size: %u, mtu: %u, "
1834 "number of reassemble entries: %u\n"
1835 "multi-segment support is required\n",
1836 frame_buf_size, mtu_size, frag_tbl_sz);
1839 print_app_sa_prm(&app_sa_prm);
1842 argv[optind-1] = prgname;
1845 optind = 1; /* reset getopt lib */
1850 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
1852 char buf[RTE_ETHER_ADDR_FMT_SIZE];
1853 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
1854 printf("%s%s", name, buf);
1858 * Update destination ethaddr for the port.
1861 add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr)
1863 if (port >= RTE_DIM(ethaddr_tbl))
1866 ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr);
1870 /* Check the link status of all ports in up to 9s, and print them finally */
1872 check_all_ports_link_status(uint32_t port_mask)
1874 #define CHECK_INTERVAL 100 /* 100ms */
1875 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1877 uint8_t count, all_ports_up, print_flag = 0;
1878 struct rte_eth_link link;
1880 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
1882 printf("\nChecking link status");
1884 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1886 RTE_ETH_FOREACH_DEV(portid) {
1887 if ((port_mask & (1 << portid)) == 0)
1889 memset(&link, 0, sizeof(link));
1890 ret = rte_eth_link_get_nowait(portid, &link);
1893 if (print_flag == 1)
1894 printf("Port %u link get failed: %s\n",
1895 portid, rte_strerror(-ret));
1898 /* print link status if flag set */
1899 if (print_flag == 1) {
1900 rte_eth_link_to_str(link_status_text,
1901 sizeof(link_status_text), &link);
1902 printf("Port %d %s\n", portid,
1906 /* clear all_ports_up flag if any link down */
1907 if (link.link_status == RTE_ETH_LINK_DOWN) {
1912 /* after finally printing all link status, get out */
1913 if (print_flag == 1)
1916 if (all_ports_up == 0) {
1919 rte_delay_ms(CHECK_INTERVAL);
1922 /* set the print_flag if all ports up or timeout */
1923 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1931 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1932 uint16_t qp, struct lcore_params *params,
1933 struct ipsec_ctx *ipsec_ctx,
1934 const struct rte_cryptodev_capabilities *cipher,
1935 const struct rte_cryptodev_capabilities *auth,
1936 const struct rte_cryptodev_capabilities *aead)
1940 struct cdev_key key = { 0 };
1942 key.lcore_id = params->lcore_id;
1944 key.cipher_algo = cipher->sym.cipher.algo;
1946 key.auth_algo = auth->sym.auth.algo;
1948 key.aead_algo = aead->sym.aead.algo;
1950 ret = rte_hash_lookup(map, &key);
1954 for (i = 0; i < ipsec_ctx->nb_qps; i++)
1955 if (ipsec_ctx->tbl[i].id == cdev_id)
1958 if (i == ipsec_ctx->nb_qps) {
1959 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1960 printf("Maximum number of crypto devices assigned to "
1961 "a core, increase MAX_QP_PER_LCORE value\n");
1964 ipsec_ctx->tbl[i].id = cdev_id;
1965 ipsec_ctx->tbl[i].qp = qp;
1966 ipsec_ctx->nb_qps++;
1967 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1968 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1972 ret = rte_hash_add_key_data(map, &key, (void *)i);
1974 printf("Faled to insert cdev mapping for (lcore %u, "
1975 "cdev %u, qp %u), errno %d\n",
1976 key.lcore_id, ipsec_ctx->tbl[i].id,
1977 ipsec_ctx->tbl[i].qp, ret);
1985 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1986 uint16_t qp, struct lcore_params *params)
1989 const struct rte_cryptodev_capabilities *i, *j;
1990 struct rte_hash *map;
1991 struct lcore_conf *qconf;
1992 struct ipsec_ctx *ipsec_ctx;
1995 qconf = &lcore_conf[params->lcore_id];
1997 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1999 ipsec_ctx = &qconf->outbound;
2003 ipsec_ctx = &qconf->inbound;
2007 /* Required cryptodevs with operation chainning */
2008 if (!(dev_info->feature_flags &
2009 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
2012 for (i = dev_info->capabilities;
2013 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
2014 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2017 if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2018 ret |= add_mapping(map, str, cdev_id, qp, params,
2019 ipsec_ctx, NULL, NULL, i);
2023 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
2026 for (j = dev_info->capabilities;
2027 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
2028 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2031 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
2034 ret |= add_mapping(map, str, cdev_id, qp, params,
2035 ipsec_ctx, i, j, NULL);
2042 /* Check if the device is enabled by cryptodev_mask */
2044 check_cryptodev_mask(uint8_t cdev_id)
2046 if (enabled_cryptodev_mask & (1 << cdev_id))
2053 cryptodevs_init(uint16_t req_queue_num)
2055 struct rte_cryptodev_config dev_conf;
2056 struct rte_cryptodev_qp_conf qp_conf;
2057 uint16_t idx, max_nb_qps, qp, total_nb_qps, i;
2059 struct rte_hash_parameters params = { 0 };
2061 const uint64_t mseg_flag = multi_seg_required() ?
2062 RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
2064 params.entries = CDEV_MAP_ENTRIES;
2065 params.key_len = sizeof(struct cdev_key);
2066 params.hash_func = rte_jhash;
2067 params.hash_func_init_val = 0;
2068 params.socket_id = rte_socket_id();
2070 params.name = "cdev_map_in";
2071 cdev_map_in = rte_hash_create(¶ms);
2072 if (cdev_map_in == NULL)
2073 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
2076 params.name = "cdev_map_out";
2077 cdev_map_out = rte_hash_create(¶ms);
2078 if (cdev_map_out == NULL)
2079 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
2082 printf("lcore/cryptodev/qp mappings:\n");
2086 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
2087 struct rte_cryptodev_info cdev_info;
2089 if (check_cryptodev_mask((uint8_t)cdev_id))
2092 rte_cryptodev_info_get(cdev_id, &cdev_info);
2094 if ((mseg_flag & cdev_info.feature_flags) != mseg_flag)
2095 rte_exit(EXIT_FAILURE,
2096 "Device %hd does not support \'%s\' feature\n",
2098 rte_cryptodev_get_feature_name(mseg_flag));
2100 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
2101 max_nb_qps = cdev_info.max_nb_queue_pairs;
2103 max_nb_qps = nb_lcore_params;
2107 while (qp < max_nb_qps && i < nb_lcore_params) {
2108 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
2109 &lcore_params[idx]))
2112 idx = idx % nb_lcore_params;
2116 qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp));
2121 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
2122 dev_conf.nb_queue_pairs = qp;
2123 dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
2125 uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
2126 if (dev_max_sess != 0 &&
2127 dev_max_sess < get_nb_crypto_sessions())
2128 rte_exit(EXIT_FAILURE,
2129 "Device does not support at least %u "
2130 "sessions", get_nb_crypto_sessions());
2132 if (rte_cryptodev_configure(cdev_id, &dev_conf))
2133 rte_panic("Failed to initialize cryptodev %u\n",
2136 qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
2137 qp_conf.mp_session =
2138 socket_ctx[dev_conf.socket_id].session_pool;
2139 qp_conf.mp_session_private =
2140 socket_ctx[dev_conf.socket_id].session_priv_pool;
2141 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
2142 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
2143 &qp_conf, dev_conf.socket_id))
2144 rte_panic("Failed to setup queue %u for "
2145 "cdev_id %u\n", 0, cdev_id);
2147 if (rte_cryptodev_start(cdev_id))
2148 rte_panic("Failed to start cryptodev %u\n",
2154 return total_nb_qps;
2158 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
2160 struct rte_eth_dev_info dev_info;
2161 struct rte_eth_txconf *txconf;
2162 uint16_t nb_tx_queue, nb_rx_queue;
2163 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
2164 int32_t ret, socket_id;
2165 struct lcore_conf *qconf;
2166 struct rte_ether_addr ethaddr;
2167 struct rte_eth_conf local_port_conf = port_conf;
2169 ret = rte_eth_dev_info_get(portid, &dev_info);
2171 rte_exit(EXIT_FAILURE,
2172 "Error during getting device (port %u) info: %s\n",
2173 portid, strerror(-ret));
2175 /* limit allowed HW offloafs, as user requested */
2176 dev_info.rx_offload_capa &= dev_rx_offload;
2177 dev_info.tx_offload_capa &= dev_tx_offload;
2179 printf("Configuring device port %u:\n", portid);
2181 ret = rte_eth_macaddr_get(portid, ðaddr);
2183 rte_exit(EXIT_FAILURE,
2184 "Error getting MAC address (port %u): %s\n",
2185 portid, rte_strerror(-ret));
2187 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr);
2188 print_ethaddr("Address: ", ðaddr);
2191 nb_rx_queue = get_port_nb_rx_queues(portid);
2192 nb_tx_queue = nb_lcores;
2194 if (nb_rx_queue > dev_info.max_rx_queues)
2195 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
2196 "(max rx queue is %u)\n",
2197 nb_rx_queue, dev_info.max_rx_queues);
2199 if (nb_tx_queue > dev_info.max_tx_queues)
2200 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
2201 "(max tx queue is %u)\n",
2202 nb_tx_queue, dev_info.max_tx_queues);
2204 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
2205 nb_rx_queue, nb_tx_queue);
2207 local_port_conf.rxmode.mtu = mtu_size;
2209 if (multi_seg_required()) {
2210 local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
2211 local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
2214 local_port_conf.rxmode.offloads |= req_rx_offloads;
2215 local_port_conf.txmode.offloads |= req_tx_offloads;
2217 /* Check that all required capabilities are supported */
2218 if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
2219 local_port_conf.rxmode.offloads)
2220 rte_exit(EXIT_FAILURE,
2221 "Error: port %u required RX offloads: 0x%" PRIx64
2222 ", avaialbe RX offloads: 0x%" PRIx64 "\n",
2223 portid, local_port_conf.rxmode.offloads,
2224 dev_info.rx_offload_capa);
2226 if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
2227 local_port_conf.txmode.offloads)
2228 rte_exit(EXIT_FAILURE,
2229 "Error: port %u required TX offloads: 0x%" PRIx64
2230 ", avaialbe TX offloads: 0x%" PRIx64 "\n",
2231 portid, local_port_conf.txmode.offloads,
2232 dev_info.tx_offload_capa);
2234 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
2235 local_port_conf.txmode.offloads |=
2236 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
2238 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
2239 local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
2241 printf("port %u configurng rx_offloads=0x%" PRIx64
2242 ", tx_offloads=0x%" PRIx64 "\n",
2243 portid, local_port_conf.rxmode.offloads,
2244 local_port_conf.txmode.offloads);
2246 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
2247 dev_info.flow_type_rss_offloads;
2248 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
2249 port_conf.rx_adv_conf.rss_conf.rss_hf) {
2250 printf("Port %u modified RSS hash function based on hardware support,"
2251 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
2253 port_conf.rx_adv_conf.rss_conf.rss_hf,
2254 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
2257 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
2260 rte_exit(EXIT_FAILURE, "Cannot configure device: "
2261 "err=%d, port=%d\n", ret, portid);
2263 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
2265 rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
2266 "err=%d, port=%d\n", ret, portid);
2268 /* init one TX queue per lcore */
2270 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2271 if (rte_lcore_is_enabled(lcore_id) == 0)
2275 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2280 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
2282 txconf = &dev_info.default_txconf;
2283 txconf->offloads = local_port_conf.txmode.offloads;
2285 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
2288 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
2289 "err=%d, port=%d\n", ret, portid);
2291 qconf = &lcore_conf[lcore_id];
2292 qconf->tx_queue_id[portid] = tx_queueid;
2294 /* Pre-populate pkt offloads based on capabilities */
2295 qconf->outbound.ipv4_offloads = RTE_MBUF_F_TX_IPV4;
2296 qconf->outbound.ipv6_offloads = RTE_MBUF_F_TX_IPV6;
2297 if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
2298 qconf->outbound.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM;
2302 /* init RX queues */
2303 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
2304 struct rte_eth_rxconf rxq_conf;
2306 if (portid != qconf->rx_queue_list[queue].port_id)
2309 rx_queueid = qconf->rx_queue_list[queue].queue_id;
2311 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
2314 rxq_conf = dev_info.default_rxconf;
2315 rxq_conf.offloads = local_port_conf.rxmode.offloads;
2316 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
2317 nb_rxd, socket_id, &rxq_conf,
2318 socket_ctx[socket_id].mbuf_pool);
2320 rte_exit(EXIT_FAILURE,
2321 "rte_eth_rx_queue_setup: err=%d, "
2322 "port=%d\n", ret, portid);
2329 max_session_size(void)
2333 int16_t cdev_id, port_id, n;
2336 n = rte_cryptodev_count();
2337 for (cdev_id = 0; cdev_id != n; cdev_id++) {
2338 sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
2342 * If crypto device is security capable, need to check the
2343 * size of security session as well.
2346 /* Get security context of the crypto device */
2347 sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
2348 if (sec_ctx == NULL)
2351 /* Get size of security session */
2352 sz = rte_security_session_get_size(sec_ctx);
2357 RTE_ETH_FOREACH_DEV(port_id) {
2358 if ((enabled_port_mask & (1 << port_id)) == 0)
2361 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
2362 if (sec_ctx == NULL)
2365 sz = rte_security_session_get_size(sec_ctx);
2374 session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
2376 char mp_name[RTE_MEMPOOL_NAMESIZE];
2377 struct rte_mempool *sess_mp;
2380 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2381 "sess_mp_%u", socket_id);
2382 nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
2384 nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
2385 CDEV_MP_CACHE_MULTIPLIER);
2386 sess_mp = rte_cryptodev_sym_session_pool_create(
2387 mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0,
2389 ctx->session_pool = sess_mp;
2391 if (ctx->session_pool == NULL)
2392 rte_exit(EXIT_FAILURE,
2393 "Cannot init session pool on socket %d\n", socket_id);
2395 printf("Allocated session pool on socket %d\n", socket_id);
2399 session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
2402 char mp_name[RTE_MEMPOOL_NAMESIZE];
2403 struct rte_mempool *sess_mp;
2406 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2407 "sess_mp_priv_%u", socket_id);
2408 nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
2410 nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
2411 CDEV_MP_CACHE_MULTIPLIER);
2412 sess_mp = rte_mempool_create(mp_name,
2416 0, NULL, NULL, NULL,
2419 ctx->session_priv_pool = sess_mp;
2421 if (ctx->session_priv_pool == NULL)
2422 rte_exit(EXIT_FAILURE,
2423 "Cannot init session priv pool on socket %d\n",
2426 printf("Allocated session priv pool on socket %d\n",
2431 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
2436 snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
2437 ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
2438 MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
2439 frame_buf_size, socket_id);
2442 * if multi-segment support is enabled, then create a pool
2443 * for indirect mbufs.
2445 ms = multi_seg_required();
2447 snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
2448 ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
2449 MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
2452 if (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL))
2453 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
2456 printf("Allocated mbuf pool on socket %d\n", socket_id);
2460 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
2462 struct ipsec_sa *sa;
2464 /* For inline protocol processing, the metadata in the event will
2465 * uniquely identify the security session which raised the event.
2466 * Application would then need the userdata it had registered with the
2467 * security session to process the event.
2470 sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
2473 /* userdata could not be retrieved */
2477 /* Sequence number over flow. SA need to be re-established */
2483 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2484 void *param, void *ret_param)
2487 struct rte_eth_event_ipsec_desc *event_desc = NULL;
2488 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
2489 rte_eth_dev_get_sec_ctx(port_id);
2491 RTE_SET_USED(param);
2493 if (type != RTE_ETH_EVENT_IPSEC)
2496 event_desc = ret_param;
2497 if (event_desc == NULL) {
2498 printf("Event descriptor not set\n");
2502 md = event_desc->metadata;
2504 if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
2505 return inline_ipsec_event_esn_overflow(ctx, md);
2506 else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
2507 printf("Invalid IPsec event reported\n");
2515 rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
2516 struct rte_mbuf *pkt[], uint16_t nb_pkts,
2517 __rte_unused uint16_t max_pkts, void *user_param)
2521 struct lcore_conf *lc;
2522 struct rte_mbuf *mb;
2523 struct rte_ether_hdr *eth;
2529 for (i = 0; i != nb_pkts; i++) {
2532 eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
2533 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
2535 struct rte_ipv4_hdr *iph;
2537 iph = (struct rte_ipv4_hdr *)(eth + 1);
2538 if (rte_ipv4_frag_pkt_is_fragmented(iph)) {
2540 mb->l2_len = sizeof(*eth);
2541 mb->l3_len = sizeof(*iph);
2542 tm = (tm != 0) ? tm : rte_rdtsc();
2543 mb = rte_ipv4_frag_reassemble_packet(
2544 lc->frag.tbl, &lc->frag.dr,
2548 /* fix ip cksum after reassemble. */
2549 iph = rte_pktmbuf_mtod_offset(mb,
2550 struct rte_ipv4_hdr *,
2552 iph->hdr_checksum = 0;
2553 iph->hdr_checksum = rte_ipv4_cksum(iph);
2556 } else if (eth->ether_type ==
2557 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
2559 struct rte_ipv6_hdr *iph;
2560 struct ipv6_extension_fragment *fh;
2562 iph = (struct rte_ipv6_hdr *)(eth + 1);
2563 fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
2565 mb->l2_len = sizeof(*eth);
2566 mb->l3_len = (uintptr_t)fh - (uintptr_t)iph +
2568 tm = (tm != 0) ? tm : rte_rdtsc();
2569 mb = rte_ipv6_frag_reassemble_packet(
2570 lc->frag.tbl, &lc->frag.dr,
2573 /* fix l3_len after reassemble. */
2574 mb->l3_len = mb->l3_len - sizeof(*fh);
2582 /* some fragments were encountered, drain death row */
2584 rte_ip_frag_free_death_row(&lc->frag.dr, 0);
2591 reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)
2595 uint64_t frag_cycles;
2596 const struct lcore_rx_queue *rxq;
2597 const struct rte_eth_rxtx_callback *cb;
2599 /* create fragment table */
2600 sid = rte_lcore_to_socket_id(cid);
2601 frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) /
2602 NS_PER_S * frag_ttl_ns;
2604 lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
2605 FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
2606 if (lc->frag.tbl == NULL) {
2607 printf("%s(%u): failed to create fragment table of size: %u, "
2609 __func__, cid, frag_tbl_sz, rte_errno);
2613 /* setup reassemble RX callbacks for all queues */
2614 for (i = 0; i != lc->nb_rx_queue; i++) {
2616 rxq = lc->rx_queue_list + i;
2617 cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,
2620 printf("%s(%u): failed to install RX callback for "
2621 "portid=%u, queueid=%u, error code: %d\n",
2623 rxq->port_id, rxq->queue_id, rte_errno);
2632 reassemble_init(void)
2638 for (i = 0; i != nb_lcore_params; i++) {
2639 lc = lcore_params[i].lcore_id;
2640 rc = reassemble_lcore_init(lcore_conf + lc, lc);
2649 create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
2651 struct rte_flow_action action[2];
2652 struct rte_flow_item pattern[2];
2653 struct rte_flow_attr attr = {0};
2654 struct rte_flow_error err;
2655 struct rte_flow *flow;
2658 if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
2661 /* Add the default rte_flow to enable SECURITY for all ESP packets */
2663 pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
2664 pattern[0].spec = NULL;
2665 pattern[0].mask = NULL;
2666 pattern[0].last = NULL;
2667 pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
2669 action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
2670 action[0].conf = NULL;
2671 action[1].type = RTE_FLOW_ACTION_TYPE_END;
2672 action[1].conf = NULL;
2676 ret = rte_flow_validate(port_id, &attr, pattern, action, &err);
2680 flow = rte_flow_create(port_id, &attr, pattern, action, &err);
2684 flow_info_tbl[port_id].rx_def_flow = flow;
2685 RTE_LOG(INFO, IPSEC,
2686 "Created default flow enabling SECURITY for all ESP traffic on port %d\n",
2691 signal_handler(int signum)
2693 if (signum == SIGINT || signum == SIGTERM) {
2694 printf("\n\nSignal %d received, preparing to exit...\n",
2701 ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
2703 struct rte_ipsec_session *ips;
2709 for (i = 0; i < nb_sa; i++) {
2710 ips = ipsec_get_primary_session(&sa[i]);
2711 if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
2712 rte_exit(EXIT_FAILURE, "Event mode supports only "
2713 "inline protocol sessions\n");
2719 check_event_mode_params(struct eh_conf *eh_conf)
2721 struct eventmode_conf *em_conf = NULL;
2722 struct lcore_params *params;
2725 if (!eh_conf || !eh_conf->mode_params)
2728 /* Get eventmode conf */
2729 em_conf = eh_conf->mode_params;
2731 if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL &&
2732 em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) {
2733 printf("error: option --event-schedule-type applies only to "
2738 if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
2741 /* Set schedule type to ORDERED if it wasn't explicitly set by user */
2742 if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET)
2743 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
2746 * Event mode currently supports only inline protocol sessions.
2747 * If there are other types of sessions configured then exit with
2750 ev_mode_sess_verify(sa_in, nb_sa_in);
2751 ev_mode_sess_verify(sa_out, nb_sa_out);
2754 /* Option --config does not apply to event mode */
2755 if (nb_lcore_params > 0) {
2756 printf("error: option --config applies only to poll mode\n");
2761 * In order to use the same port_init routine for both poll and event
2762 * modes initialize lcore_params with one queue for each eth port
2764 lcore_params = lcore_params_array;
2765 RTE_ETH_FOREACH_DEV(portid) {
2766 if ((enabled_port_mask & (1 << portid)) == 0)
2769 params = &lcore_params[nb_lcore_params++];
2770 params->port_id = portid;
2771 params->queue_id = 0;
2772 params->lcore_id = rte_get_next_lcore(0, 0, 1);
2779 inline_sessions_free(struct sa_ctx *sa_ctx)
2781 struct rte_ipsec_session *ips;
2782 struct ipsec_sa *sa;
2789 for (i = 0; i < sa_ctx->nb_sa; i++) {
2791 sa = &sa_ctx->sa[i];
2795 ips = ipsec_get_primary_session(sa);
2796 if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
2797 ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
2800 if (!rte_eth_dev_is_valid_port(sa->portid))
2803 ret = rte_security_session_destroy(
2804 rte_eth_dev_get_sec_ctx(sa->portid),
2807 RTE_LOG(ERR, IPSEC, "Failed to destroy security "
2808 "session type %d, spi %d\n",
2809 ips->type, sa->spi);
2814 calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq,
2817 return RTE_MAX((nb_rxq * nb_rxd +
2818 nb_ports * nb_lcores * MAX_PKT_BURST +
2819 nb_ports * nb_txq * nb_txd +
2820 nb_lcores * MEMPOOL_CACHE_SIZE +
2821 nb_crypto_qp * CDEV_QUEUE_DESC +
2822 nb_lcores * frag_tbl_sz *
2823 FRAG_TBL_BUCKET_ENTRIES),
2828 main(int32_t argc, char **argv)
2831 uint32_t lcore_id, nb_txq, nb_rxq = 0;
2835 uint16_t portid, nb_crypto_qp, nb_ports = 0;
2836 uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
2837 uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
2838 struct eh_conf *eh_conf = NULL;
2841 nb_bufs_in_pool = 0;
2844 ret = rte_eal_init(argc, argv);
2846 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2851 signal(SIGINT, signal_handler);
2852 signal(SIGTERM, signal_handler);
2854 /* initialize event helper configuration */
2855 eh_conf = eh_conf_init();
2856 if (eh_conf == NULL)
2857 rte_exit(EXIT_FAILURE, "Failed to init event helper config");
2859 /* parse application arguments (after the EAL ones) */
2860 ret = parse_args(argc, argv, eh_conf);
2862 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
2864 /* parse configuration file */
2865 if (parse_cfg_file(cfgfile) < 0) {
2866 printf("parsing file \"%s\" failed\n",
2868 print_usage(argv[0]);
2872 if ((unprotected_port_mask & enabled_port_mask) !=
2873 unprotected_port_mask)
2874 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
2875 unprotected_port_mask);
2877 if (check_poll_mode_params(eh_conf) < 0)
2878 rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n");
2880 if (check_event_mode_params(eh_conf) < 0)
2881 rte_exit(EXIT_FAILURE, "check_event_mode_params failed\n");
2883 ret = init_lcore_rx_queues();
2885 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2887 nb_lcores = rte_lcore_count();
2889 sess_sz = max_session_size();
2892 * In event mode request minimum number of crypto queues
2893 * to be reserved equal to number of ports.
2895 if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT)
2896 nb_crypto_qp = rte_eth_dev_count_avail();
2900 nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
2902 if (nb_bufs_in_pool == 0) {
2903 RTE_ETH_FOREACH_DEV(portid) {
2904 if ((enabled_port_mask & (1 << portid)) == 0)
2907 nb_rxq += get_port_nb_rx_queues(portid);
2912 nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp,
2916 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2917 if (rte_lcore_is_enabled(lcore_id) == 0)
2921 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2925 /* mbuf_pool is initialised by the pool_init() function*/
2926 if (socket_ctx[socket_id].mbuf_pool)
2929 pool_init(&socket_ctx[socket_id], socket_id, nb_bufs_in_pool);
2930 session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
2931 session_priv_pool_init(&socket_ctx[socket_id], socket_id,
2934 printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool);
2936 RTE_ETH_FOREACH_DEV(portid) {
2937 if ((enabled_port_mask & (1 << portid)) == 0)
2940 sa_check_offloads(portid, &req_rx_offloads[portid],
2941 &req_tx_offloads[portid]);
2942 port_init(portid, req_rx_offloads[portid],
2943 req_tx_offloads[portid]);
2947 * Set the enabled port mask in helper config for use by helper
2948 * sub-system. This will be used while initializing devices using
2949 * helper sub-system.
2951 eh_conf->eth_portmask = enabled_port_mask;
2953 /* Initialize eventmode components */
2954 ret = eh_devs_init(eh_conf);
2956 rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret);
2959 RTE_ETH_FOREACH_DEV(portid) {
2960 if ((enabled_port_mask & (1 << portid)) == 0)
2963 /* Create flow before starting the device */
2964 create_default_ipsec_flow(portid, req_rx_offloads[portid]);
2966 ret = rte_eth_dev_start(portid);
2968 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
2969 "err=%d, port=%d\n", ret, portid);
2971 * If enabled, put device in promiscuous mode.
2972 * This allows IO forwarding mode to forward packets
2973 * to itself through 2 cross-connected ports of the
2976 if (promiscuous_on) {
2977 ret = rte_eth_promiscuous_enable(portid);
2979 rte_exit(EXIT_FAILURE,
2980 "rte_eth_promiscuous_enable: err=%s, port=%d\n",
2981 rte_strerror(-ret), portid);
2984 rte_eth_dev_callback_register(portid,
2985 RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
2988 /* fragment reassemble is enabled */
2989 if (frag_tbl_sz != 0) {
2990 ret = reassemble_init();
2992 rte_exit(EXIT_FAILURE, "failed at reassemble init");
2995 /* Replicate each context per socket */
2996 for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
2997 socket_id = rte_socket_id_by_idx(i);
2998 if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
2999 (socket_ctx[socket_id].sa_in == NULL) &&
3000 (socket_ctx[socket_id].sa_out == NULL)) {
3001 sa_init(&socket_ctx[socket_id], socket_id);
3002 sp4_init(&socket_ctx[socket_id], socket_id);
3003 sp6_init(&socket_ctx[socket_id], socket_id);
3004 rt_init(&socket_ctx[socket_id], socket_id);
3010 check_all_ports_link_status(enabled_port_mask);
3012 #if (STATS_INTERVAL > 0)
3013 rte_eal_alarm_set(STATS_INTERVAL * US_PER_S, print_stats_cb, NULL);
3015 RTE_LOG(INFO, IPSEC, "Stats display disabled\n");
3016 #endif /* STATS_INTERVAL */
3018 /* launch per-lcore init on every lcore */
3019 rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN);
3020 RTE_LCORE_FOREACH_WORKER(lcore_id) {
3021 if (rte_eal_wait_lcore(lcore_id) < 0)
3025 /* Uninitialize eventmode components */
3026 ret = eh_devs_uninit(eh_conf);
3028 rte_exit(EXIT_FAILURE, "eh_devs_uninit failed, err=%d\n", ret);
3030 /* Free eventmode configuration memory */
3031 eh_conf_uninit(eh_conf);
3033 /* Destroy inline inbound and outbound sessions */
3034 for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
3035 socket_id = rte_socket_id_by_idx(i);
3036 inline_sessions_free(socket_ctx[socket_id].sa_in);
3037 inline_sessions_free(socket_ctx[socket_id].sa_out);
3040 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
3041 printf("Closing cryptodev %d...", cdev_id);
3042 rte_cryptodev_stop(cdev_id);
3043 rte_cryptodev_close(cdev_id);
3047 RTE_ETH_FOREACH_DEV(portid) {
3048 if ((enabled_port_mask & (1 << portid)) == 0)
3051 printf("Closing port %d...", portid);
3052 if (flow_info_tbl[portid].rx_def_flow) {
3053 struct rte_flow_error err;
3055 ret = rte_flow_destroy(portid,
3056 flow_info_tbl[portid].rx_def_flow, &err);
3058 RTE_LOG(ERR, IPSEC, "Failed to destroy flow "
3059 " for port %u, err msg: %s\n", portid,
3062 ret = rte_eth_dev_stop(portid);
3065 "rte_eth_dev_stop: err=%s, port=%u\n",
3066 rte_strerror(-ret), portid);
3068 rte_eth_dev_close(portid);
3072 /* clean up the EAL */