1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
10 #include <sys/types.h>
11 #include <netinet/in.h>
12 #include <netinet/ip.h>
13 #include <netinet/ip6.h>
15 #include <sys/queue.h>
21 #include <rte_common.h>
22 #include <rte_bitmap.h>
23 #include <rte_byteorder.h>
26 #include <rte_launch.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
37 #include <rte_mempool.h>
43 #include <rte_jhash.h>
44 #include <rte_cryptodev.h>
45 #include <rte_security.h>
46 #include <rte_eventdev.h>
48 #include <rte_ip_frag.h>
49 #include <rte_alarm.h>
50 #include <rte_telemetry.h>
52 #include "event_helper.h"
55 #include "ipsec_worker.h"
59 volatile bool force_quit;
61 #define MAX_JUMBO_PKT_LEN 9600
63 #define MEMPOOL_CACHE_SIZE 256
65 #define CDEV_QUEUE_DESC 2048
66 #define CDEV_MAP_ENTRIES 16384
67 #define CDEV_MP_CACHE_SZ 64
68 #define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
69 #define MAX_QUEUE_PAIRS 1
71 #define MAX_LCORE_PARAMS 1024
74 * Configurable number of RX/TX ring descriptors
76 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
77 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
78 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
79 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
81 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
82 (addr)->addr_bytes[0], (addr)->addr_bytes[1], \
83 (addr)->addr_bytes[2], (addr)->addr_bytes[3], \
84 (addr)->addr_bytes[4], (addr)->addr_bytes[5], \
87 #define FRAG_TBL_BUCKET_ENTRIES 4
88 #define MAX_FRAG_TTL_NS (10LL * NS_PER_S)
90 #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
92 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
93 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
94 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
95 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
96 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
99 struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS];
101 #define CMD_LINE_OPT_CONFIG "config"
102 #define CMD_LINE_OPT_SINGLE_SA "single-sa"
103 #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
104 #define CMD_LINE_OPT_TRANSFER_MODE "transfer-mode"
105 #define CMD_LINE_OPT_SCHEDULE_TYPE "event-schedule-type"
106 #define CMD_LINE_OPT_RX_OFFLOAD "rxoffload"
107 #define CMD_LINE_OPT_TX_OFFLOAD "txoffload"
108 #define CMD_LINE_OPT_REASSEMBLE "reassemble"
109 #define CMD_LINE_OPT_MTU "mtu"
110 #define CMD_LINE_OPT_FRAG_TTL "frag-ttl"
111 #define CMD_LINE_OPT_EVENT_VECTOR "event-vector"
112 #define CMD_LINE_OPT_VECTOR_SIZE "vector-size"
113 #define CMD_LINE_OPT_VECTOR_TIMEOUT "vector-tmo"
114 #define CMD_LINE_OPT_VECTOR_POOL_SZ "vector-pool-sz"
115 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
117 #define CMD_LINE_ARG_EVENT "event"
118 #define CMD_LINE_ARG_POLL "poll"
119 #define CMD_LINE_ARG_ORDERED "ordered"
120 #define CMD_LINE_ARG_ATOMIC "atomic"
121 #define CMD_LINE_ARG_PARALLEL "parallel"
124 /* long options mapped to a short option */
126 /* first long only option value must be >= 256, so that we won't
127 * conflict with short options
129 CMD_LINE_OPT_MIN_NUM = 256,
130 CMD_LINE_OPT_CONFIG_NUM,
131 CMD_LINE_OPT_SINGLE_SA_NUM,
132 CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
133 CMD_LINE_OPT_TRANSFER_MODE_NUM,
134 CMD_LINE_OPT_SCHEDULE_TYPE_NUM,
135 CMD_LINE_OPT_RX_OFFLOAD_NUM,
136 CMD_LINE_OPT_TX_OFFLOAD_NUM,
137 CMD_LINE_OPT_REASSEMBLE_NUM,
138 CMD_LINE_OPT_MTU_NUM,
139 CMD_LINE_OPT_FRAG_TTL_NUM,
140 CMD_LINE_OPT_EVENT_VECTOR_NUM,
141 CMD_LINE_OPT_VECTOR_SIZE_NUM,
142 CMD_LINE_OPT_VECTOR_TIMEOUT_NUM,
143 CMD_LINE_OPT_VECTOR_POOL_SZ_NUM,
144 CMD_LINE_OPT_PER_PORT_POOL_NUM,
147 static const struct option lgopts[] = {
148 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
149 {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
150 {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
151 {CMD_LINE_OPT_TRANSFER_MODE, 1, 0, CMD_LINE_OPT_TRANSFER_MODE_NUM},
152 {CMD_LINE_OPT_SCHEDULE_TYPE, 1, 0, CMD_LINE_OPT_SCHEDULE_TYPE_NUM},
153 {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
154 {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
155 {CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
156 {CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
157 {CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM},
158 {CMD_LINE_OPT_EVENT_VECTOR, 0, 0, CMD_LINE_OPT_EVENT_VECTOR_NUM},
159 {CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
160 {CMD_LINE_OPT_VECTOR_TIMEOUT, 1, 0, CMD_LINE_OPT_VECTOR_TIMEOUT_NUM},
161 {CMD_LINE_OPT_VECTOR_POOL_SZ, 1, 0, CMD_LINE_OPT_VECTOR_POOL_SZ_NUM},
162 {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PER_PORT_POOL_NUM},
166 uint32_t unprotected_port_mask;
167 uint32_t single_sa_idx;
168 /* mask of enabled ports */
169 static uint32_t enabled_port_mask;
170 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
171 static int32_t promiscuous_on = 1;
172 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
173 static uint32_t nb_lcores;
175 uint32_t nb_bufs_in_pool;
178 * RX/TX HW offload capabilities to enable/use on ethernet ports.
179 * By default all capabilities are enabled.
181 static uint64_t dev_rx_offload = UINT64_MAX;
182 static uint64_t dev_tx_offload = UINT64_MAX;
185 * global values that determine multi-seg policy
187 uint32_t frag_tbl_sz;
188 static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
189 uint32_t mtu_size = RTE_ETHER_MTU;
190 static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
191 static uint32_t stats_interval;
193 /* application wide librte_ipsec/SA parameters */
194 struct app_sa_prm app_sa_prm = {
196 .cache_sz = SA_CACHE_SZ,
199 static const char *cfgfile;
201 struct lcore_params {
205 } __rte_cache_aligned;
207 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
209 static struct lcore_params *lcore_params;
210 static uint16_t nb_lcore_params;
212 static struct rte_hash *cdev_map_in;
213 static struct rte_hash *cdev_map_out;
215 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
217 static struct rte_eth_conf port_conf = {
219 .mq_mode = RTE_ETH_MQ_RX_RSS,
221 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
226 .rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
227 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
231 .mq_mode = RTE_ETH_MQ_TX_NONE,
235 struct socket_ctx socket_ctx[NB_SOCKETS];
241 * Determine is multi-segment support required:
242 * - either frame buffer size is smaller then mtu
243 * - or reassemble support is requested
246 multi_seg_required(void)
248 return (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM >
249 frame_buf_size || frag_tbl_sz != 0);
253 struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
255 /* Print out statistics on packet distribution */
257 print_stats_cb(__rte_unused void *param)
259 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
260 float burst_percent, rx_per_call, tx_per_call;
263 total_packets_dropped = 0;
264 total_packets_tx = 0;
265 total_packets_rx = 0;
267 const char clr[] = { 27, '[', '2', 'J', '\0' };
268 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
270 /* Clear screen and move to top left */
271 printf("%s%s", clr, topLeft);
273 printf("\nCore statistics ====================================");
275 for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
276 /* skip disabled cores */
277 if (rte_lcore_is_enabled(coreid) == 0)
279 burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
280 core_statistics[coreid].rx;
281 rx_per_call = (float)(core_statistics[coreid].rx)/
282 core_statistics[coreid].rx_call;
283 tx_per_call = (float)(core_statistics[coreid].tx)/
284 core_statistics[coreid].tx_call;
285 printf("\nStatistics for core %u ------------------------------"
286 "\nPackets received: %20"PRIu64
287 "\nPackets sent: %24"PRIu64
288 "\nPackets dropped: %21"PRIu64
289 "\nBurst percent: %23.2f"
290 "\nPackets per Rx call: %17.2f"
291 "\nPackets per Tx call: %17.2f",
293 core_statistics[coreid].rx,
294 core_statistics[coreid].tx,
295 core_statistics[coreid].dropped,
300 total_packets_dropped += core_statistics[coreid].dropped;
301 total_packets_tx += core_statistics[coreid].tx;
302 total_packets_rx += core_statistics[coreid].rx;
304 printf("\nAggregate statistics ==============================="
305 "\nTotal packets received: %14"PRIu64
306 "\nTotal packets sent: %18"PRIu64
307 "\nTotal packets dropped: %15"PRIu64,
310 total_packets_dropped);
311 printf("\n====================================================\n");
313 rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL);
317 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
326 for (i = 0; i < num; i++) {
329 ip = rte_pktmbuf_mtod(m, struct ip *);
331 if (ip->ip_v == IPVERSION) {
332 trf->ip4.pkts[n4] = m;
333 trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
334 uint8_t *, offsetof(struct ip, ip_p));
336 } else if (ip->ip_v == IP6_VERSION) {
337 trf->ip6.pkts[n6] = m;
338 trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
340 offsetof(struct ip6_hdr, ip6_nxt));
352 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
353 struct ipsec_traffic *traffic)
355 unsigned int lcoreid = rte_lcore_id();
356 uint16_t nb_pkts_in, n_ip4, n_ip6;
358 n_ip4 = traffic->ip4.num;
359 n_ip6 = traffic->ip6.num;
361 if (app_sa_prm.enable == 0) {
362 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
363 traffic->ipsec.num, MAX_PKT_BURST);
364 split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
366 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
367 traffic->ipsec.saptr, traffic->ipsec.num);
368 ipsec_process(ipsec_ctx, traffic);
371 inbound_sp_sa(ipsec_ctx->sp4_ctx,
372 ipsec_ctx->sa_ctx, &traffic->ip4, n_ip4,
373 &core_statistics[lcoreid].inbound.spd4);
375 inbound_sp_sa(ipsec_ctx->sp6_ctx,
376 ipsec_ctx->sa_ctx, &traffic->ip6, n_ip6,
377 &core_statistics[lcoreid].inbound.spd6);
381 outbound_spd_lookup(struct sp_ctx *sp,
382 struct traffic_type *ip,
383 struct traffic_type *ipsec,
384 struct ipsec_spd_stats *stats)
387 uint32_t i, j, sa_idx;
389 if (ip->num == 0 || sp == NULL)
392 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
393 ip->num, DEFAULT_MAX_CATEGORIES);
395 for (i = 0, j = 0; i < ip->num; i++) {
397 sa_idx = ip->res[i] - 1;
399 if (unlikely(ip->res[i] == DISCARD)) {
403 } else if (unlikely(ip->res[i] == BYPASS)) {
408 ipsec->res[ipsec->num] = sa_idx;
409 ipsec->pkts[ipsec->num++] = m;
418 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
419 struct ipsec_traffic *traffic)
422 uint16_t idx, nb_pkts_out, i;
423 unsigned int lcoreid = rte_lcore_id();
425 /* Drop any IPsec traffic from protected ports */
426 free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
428 traffic->ipsec.num = 0;
430 outbound_spd_lookup(ipsec_ctx->sp4_ctx,
431 &traffic->ip4, &traffic->ipsec,
432 &core_statistics[lcoreid].outbound.spd4);
434 outbound_spd_lookup(ipsec_ctx->sp6_ctx,
435 &traffic->ip6, &traffic->ipsec,
436 &core_statistics[lcoreid].outbound.spd6);
438 if (app_sa_prm.enable == 0) {
440 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
441 traffic->ipsec.res, traffic->ipsec.num,
444 for (i = 0; i < nb_pkts_out; i++) {
445 m = traffic->ipsec.pkts[i];
446 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
447 if (ip->ip_v == IPVERSION) {
448 idx = traffic->ip4.num++;
449 traffic->ip4.pkts[idx] = m;
451 idx = traffic->ip6.num++;
452 traffic->ip6.pkts[idx] = m;
456 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
457 traffic->ipsec.saptr, traffic->ipsec.num);
458 ipsec_process(ipsec_ctx, traffic);
463 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
464 struct ipsec_traffic *traffic)
467 uint32_t nb_pkts_in, i, idx;
469 if (app_sa_prm.enable == 0) {
471 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
472 traffic->ipsec.num, MAX_PKT_BURST);
474 for (i = 0; i < nb_pkts_in; i++) {
475 m = traffic->ipsec.pkts[i];
476 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
477 if (ip->ip_v == IPVERSION) {
478 idx = traffic->ip4.num++;
479 traffic->ip4.pkts[idx] = m;
481 idx = traffic->ip6.num++;
482 traffic->ip6.pkts[idx] = m;
486 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
487 traffic->ipsec.saptr, traffic->ipsec.num);
488 ipsec_process(ipsec_ctx, traffic);
493 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
494 struct ipsec_traffic *traffic)
497 uint32_t nb_pkts_out, i, n;
500 /* Drop any IPsec traffic from protected ports */
501 free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
505 for (i = 0; i < traffic->ip4.num; i++) {
506 traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
507 traffic->ipsec.res[n++] = single_sa_idx;
510 for (i = 0; i < traffic->ip6.num; i++) {
511 traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
512 traffic->ipsec.res[n++] = single_sa_idx;
515 traffic->ip4.num = 0;
516 traffic->ip6.num = 0;
517 traffic->ipsec.num = n;
519 if (app_sa_prm.enable == 0) {
521 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
522 traffic->ipsec.res, traffic->ipsec.num,
525 /* They all sue the same SA (ip4 or ip6 tunnel) */
526 m = traffic->ipsec.pkts[0];
527 ip = rte_pktmbuf_mtod(m, struct ip *);
528 if (ip->ip_v == IPVERSION) {
529 traffic->ip4.num = nb_pkts_out;
530 for (i = 0; i < nb_pkts_out; i++)
531 traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
533 traffic->ip6.num = nb_pkts_out;
534 for (i = 0; i < nb_pkts_out; i++)
535 traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
538 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
539 traffic->ipsec.saptr, traffic->ipsec.num);
540 ipsec_process(ipsec_ctx, traffic);
545 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
546 uint8_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx)
548 struct ipsec_traffic traffic;
550 prepare_traffic(ctx, pkts, &traffic, nb_pkts);
552 if (unlikely(single_sa)) {
553 if (is_unprotected_port(portid))
554 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
556 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
558 if (is_unprotected_port(portid))
559 process_pkts_inbound(&qconf->inbound, &traffic);
561 process_pkts_outbound(&qconf->outbound, &traffic);
564 route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num,
565 qconf->outbound.ipv4_offloads, true);
566 route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
570 drain_crypto_buffers(struct lcore_conf *qconf)
573 struct ipsec_ctx *ctx;
575 /* drain inbound buffers*/
576 ctx = &qconf->inbound;
577 for (i = 0; i != ctx->nb_qps; i++) {
578 if (ctx->tbl[i].len != 0)
579 enqueue_cop_burst(ctx->tbl + i);
582 /* drain outbound buffers*/
583 ctx = &qconf->outbound;
584 for (i = 0; i != ctx->nb_qps; i++) {
585 if (ctx->tbl[i].len != 0)
586 enqueue_cop_burst(ctx->tbl + i);
591 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
592 struct ipsec_ctx *ctx)
595 struct ipsec_traffic trf;
596 unsigned int lcoreid = rte_lcore_id();
598 if (app_sa_prm.enable == 0) {
600 /* dequeue packets from crypto-queue */
601 n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
602 RTE_DIM(trf.ipsec.pkts));
607 /* split traffic by ipv4-ipv6 */
608 split46_traffic(&trf, trf.ipsec.pkts, n);
610 ipsec_cqp_process(ctx, &trf);
612 /* process ipv4 packets */
613 if (trf.ip4.num != 0) {
614 inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0,
615 &core_statistics[lcoreid].inbound.spd4);
616 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
617 qconf->outbound.ipv4_offloads, true);
620 /* process ipv6 packets */
621 if (trf.ip6.num != 0) {
622 inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0,
623 &core_statistics[lcoreid].inbound.spd6);
624 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
629 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
630 struct ipsec_ctx *ctx)
633 struct ipsec_traffic trf;
635 if (app_sa_prm.enable == 0) {
637 /* dequeue packets from crypto-queue */
638 n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
639 RTE_DIM(trf.ipsec.pkts));
644 /* split traffic by ipv4-ipv6 */
645 split46_traffic(&trf, trf.ipsec.pkts, n);
647 ipsec_cqp_process(ctx, &trf);
649 /* process ipv4 packets */
650 if (trf.ip4.num != 0)
651 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num,
652 qconf->outbound.ipv4_offloads, true);
654 /* process ipv6 packets */
655 if (trf.ip6.num != 0)
656 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
659 /* main processing loop */
661 ipsec_poll_mode_worker(void)
663 struct rte_mbuf *pkts[MAX_PKT_BURST];
665 uint64_t prev_tsc, diff_tsc, cur_tsc;
669 struct lcore_conf *qconf;
670 int32_t rc, socket_id;
671 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
672 / US_PER_S * BURST_TX_DRAIN_US;
673 struct lcore_rx_queue *rxql;
676 lcore_id = rte_lcore_id();
677 qconf = &lcore_conf[lcore_id];
678 rxql = qconf->rx_queue_list;
679 socket_id = rte_lcore_to_socket_id(lcore_id);
681 qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
682 qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
683 qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
684 qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
685 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
686 qconf->inbound.cdev_map = cdev_map_in;
687 qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
688 qconf->inbound.session_priv_pool =
689 socket_ctx[socket_id].session_priv_pool;
690 qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
691 qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
692 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
693 qconf->outbound.cdev_map = cdev_map_out;
694 qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
695 qconf->outbound.session_priv_pool =
696 socket_ctx[socket_id].session_priv_pool;
697 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
699 rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
702 "SAD cache init on lcore %u, failed with code: %d\n",
707 if (qconf->nb_rx_queue == 0) {
708 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
713 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
715 for (i = 0; i < qconf->nb_rx_queue; i++) {
716 portid = rxql[i].port_id;
717 queueid = rxql[i].queue_id;
719 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
720 lcore_id, portid, queueid);
723 while (!force_quit) {
724 cur_tsc = rte_rdtsc();
726 /* TX queue buffer drain */
727 diff_tsc = cur_tsc - prev_tsc;
729 if (unlikely(diff_tsc > drain_tsc)) {
730 drain_tx_buffers(qconf);
731 drain_crypto_buffers(qconf);
735 for (i = 0; i < qconf->nb_rx_queue; ++i) {
737 /* Read packets from RX queues */
738 portid = rxql[i].port_id;
739 queueid = rxql[i].queue_id;
740 nb_rx = rte_eth_rx_burst(portid, queueid,
741 pkts, MAX_PKT_BURST);
744 core_stats_update_rx(nb_rx);
745 process_pkts(qconf, pkts, nb_rx, portid,
749 /* dequeue and process completed crypto-ops */
750 if (is_unprotected_port(portid))
751 drain_inbound_crypto_queues(qconf,
754 drain_outbound_crypto_queues(qconf,
761 check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
767 for (i = 0; i < nb_lcore_params; ++i) {
768 portid = lcore_params_array[i].port_id;
769 if (portid == fdir_portid) {
770 queueid = lcore_params_array[i].queue_id;
771 if (queueid == fdir_qid)
775 if (i == nb_lcore_params - 1)
783 check_poll_mode_params(struct eh_conf *eh_conf)
793 if (eh_conf->mode != EH_PKT_TRANSFER_MODE_POLL)
796 if (lcore_params == NULL) {
797 printf("Error: No port/queue/core mappings\n");
801 for (i = 0; i < nb_lcore_params; ++i) {
802 lcore = lcore_params[i].lcore_id;
803 if (!rte_lcore_is_enabled(lcore)) {
804 printf("error: lcore %hhu is not enabled in "
805 "lcore mask\n", lcore);
808 socket_id = rte_lcore_to_socket_id(lcore);
809 if (socket_id != 0 && numa_on == 0) {
810 printf("warning: lcore %hhu is on socket %d "
814 portid = lcore_params[i].port_id;
815 if ((enabled_port_mask & (1 << portid)) == 0) {
816 printf("port %u is not enabled in port mask\n", portid);
819 if (!rte_eth_dev_is_valid_port(portid)) {
820 printf("port %u is not present on the board\n", portid);
828 get_port_nb_rx_queues(const uint16_t port)
833 for (i = 0; i < nb_lcore_params; ++i) {
834 if (lcore_params[i].port_id == port &&
835 lcore_params[i].queue_id > queue)
836 queue = lcore_params[i].queue_id;
838 return (uint8_t)(++queue);
842 init_lcore_rx_queues(void)
844 uint16_t i, nb_rx_queue;
847 for (i = 0; i < nb_lcore_params; ++i) {
848 lcore = lcore_params[i].lcore_id;
849 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
850 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
851 printf("error: too many queues (%u) for lcore: %u\n",
852 nb_rx_queue + 1, lcore);
855 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
856 lcore_params[i].port_id;
857 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
858 lcore_params[i].queue_id;
859 lcore_conf[lcore].nb_rx_queue++;
866 print_usage(const char *prgname)
868 fprintf(stderr, "%s [EAL options] --"
874 " [-w REPLAY_WINDOW_SIZE]"
878 " [-t STATS_INTERVAL]"
879 " [-s NUMBER_OF_MBUFS_IN_PKT_POOL]"
881 " --config (port,queue,lcore)[,(port,queue,lcore)]"
882 " [--single-sa SAIDX]"
883 " [--cryptodev_mask MASK]"
884 " [--transfer-mode MODE]"
885 " [--event-schedule-type TYPE]"
886 " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
887 " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
888 " [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
889 " [--" CMD_LINE_OPT_MTU " MTU]"
891 " [--vector-size SIZE]"
892 " [--vector-tmo TIMEOUT in ns]"
894 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
895 " -P : Enable promiscuous mode\n"
896 " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
897 " -j FRAMESIZE: Data buffer size, minimum (and default)\n"
898 " value: RTE_MBUF_DEFAULT_BUF_SIZE\n"
899 " -l enables code-path that uses librte_ipsec\n"
900 " -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n"
901 " size for each SA\n"
903 " -a enables SA SQN atomic behaviour\n"
904 " -c specifies inbound SAD cache size,\n"
905 " zero value disables the cache (default value: 128)\n"
906 " -t specifies statistics screen update interval,\n"
907 " zero disables statistics screen (default value: 0)\n"
908 " -s number of mbufs in packet pool, if not specified number\n"
909 " of mbufs will be calculated based on number of cores,\n"
910 " ports and crypto queues\n"
911 " -f CONFIG_FILE: Configuration file\n"
912 " --config (port,queue,lcore): Rx queue configuration. In poll\n"
913 " mode determines which queues from\n"
914 " which ports are mapped to which cores.\n"
915 " In event mode this option is not used\n"
916 " as packets are dynamically scheduled\n"
918 " --single-sa SAIDX: In poll mode use single SA index for\n"
919 " outbound traffic, bypassing the SP\n"
920 " In event mode selects driver submode,\n"
921 " SA index value is ignored\n"
922 " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
923 " devices to configure\n"
924 " --transfer-mode MODE\n"
925 " \"poll\" : Packet transfer via polling (default)\n"
926 " \"event\" : Packet transfer via event device\n"
927 " --event-schedule-type TYPE queue schedule type, used only when\n"
928 " transfer mode is set to event\n"
929 " \"ordered\" : Ordered (default)\n"
930 " \"atomic\" : Atomic\n"
931 " \"parallel\" : Parallel\n"
932 " --" CMD_LINE_OPT_RX_OFFLOAD
933 ": bitmask of the RX HW offload capabilities to enable/use\n"
934 " (RTE_ETH_RX_OFFLOAD_*)\n"
935 " --" CMD_LINE_OPT_TX_OFFLOAD
936 ": bitmask of the TX HW offload capabilities to enable/use\n"
937 " (RTE_ETH_TX_OFFLOAD_*)\n"
938 " --" CMD_LINE_OPT_REASSEMBLE " NUM"
939 ": max number of entries in reassemble(fragment) table\n"
940 " (zero (default value) disables reassembly)\n"
941 " --" CMD_LINE_OPT_MTU " MTU"
942 ": MTU value on all ports (default value: 1500)\n"
943 " outgoing packets with bigger size will be fragmented\n"
944 " incoming packets with bigger size will be discarded\n"
945 " --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS"
946 ": fragments lifetime in nanoseconds, default\n"
947 " and maximum value is 10.000.000.000 ns (10 s)\n"
948 " --event-vector enables event vectorization\n"
949 " --vector-size Max vector size (default value: 16)\n"
950 " --vector-tmo Max vector timeout in nanoseconds"
951 " (default value: 102400)\n"
952 " --" CMD_LINE_OPT_PER_PORT_POOL " Enable per port mbuf pool\n"
953 " --" CMD_LINE_OPT_VECTOR_POOL_SZ " Vector pool size\n"
954 " (default value is based on mbuf count)\n"
960 parse_mask(const char *str, uint64_t *val)
966 t = strtoul(str, &end, 0);
967 if (errno != 0 || end[0] != 0)
975 parse_portmask(const char *portmask)
982 /* parse hexadecimal string */
983 pm = strtoul(portmask, &end, 16);
984 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
987 if ((pm == 0) && errno)
994 parse_decimal(const char *str)
999 num = strtoull(str, &end, 10);
1000 if ((str[0] == '\0') || (end == NULL) || (*end != '\0')
1008 parse_config(const char *q_arg)
1011 const char *p, *p0 = q_arg;
1019 unsigned long int_fld[_NUM_FLD];
1020 char *str_fld[_NUM_FLD];
1024 nb_lcore_params = 0;
1026 while ((p = strchr(p0, '(')) != NULL) {
1028 p0 = strchr(p, ')');
1033 if (size >= sizeof(s))
1036 snprintf(s, sizeof(s), "%.*s", size, p);
1037 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1040 for (i = 0; i < _NUM_FLD; i++) {
1042 int_fld[i] = strtoul(str_fld[i], &end, 0);
1043 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1046 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1047 printf("exceeded max number of lcore params: %hu\n",
1051 lcore_params_array[nb_lcore_params].port_id =
1052 (uint8_t)int_fld[FLD_PORT];
1053 lcore_params_array[nb_lcore_params].queue_id =
1054 (uint8_t)int_fld[FLD_QUEUE];
1055 lcore_params_array[nb_lcore_params].lcore_id =
1056 (uint8_t)int_fld[FLD_LCORE];
1059 lcore_params = lcore_params_array;
1064 print_app_sa_prm(const struct app_sa_prm *prm)
1066 printf("librte_ipsec usage: %s\n",
1067 (prm->enable == 0) ? "disabled" : "enabled");
1069 printf("replay window size: %u\n", prm->window_size);
1070 printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
1071 printf("SA flags: %#" PRIx64 "\n", prm->flags);
1072 printf("Frag TTL: %" PRIu64 " ns\n", frag_ttl_ns);
1076 parse_transfer_mode(struct eh_conf *conf, const char *optarg)
1078 if (!strcmp(CMD_LINE_ARG_POLL, optarg))
1079 conf->mode = EH_PKT_TRANSFER_MODE_POLL;
1080 else if (!strcmp(CMD_LINE_ARG_EVENT, optarg))
1081 conf->mode = EH_PKT_TRANSFER_MODE_EVENT;
1083 printf("Unsupported packet transfer mode\n");
1091 parse_schedule_type(struct eh_conf *conf, const char *optarg)
1093 struct eventmode_conf *em_conf = NULL;
1095 /* Get eventmode conf */
1096 em_conf = conf->mode_params;
1098 if (!strcmp(CMD_LINE_ARG_ORDERED, optarg))
1099 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
1100 else if (!strcmp(CMD_LINE_ARG_ATOMIC, optarg))
1101 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ATOMIC;
1102 else if (!strcmp(CMD_LINE_ARG_PARALLEL, optarg))
1103 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_PARALLEL;
1105 printf("Unsupported queue schedule type\n");
1113 parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
1118 int32_t option_index;
1119 char *prgname = argv[0];
1120 int32_t f_present = 0;
1121 struct eventmode_conf *em_conf = NULL;
1125 while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:t:s:",
1126 lgopts, &option_index)) != EOF) {
1130 enabled_port_mask = parse_portmask(optarg);
1131 if (enabled_port_mask == 0) {
1132 printf("invalid portmask\n");
1133 print_usage(prgname);
1138 printf("Promiscuous mode selected\n");
1142 unprotected_port_mask = parse_portmask(optarg);
1143 if (unprotected_port_mask == 0) {
1144 printf("invalid unprotected portmask\n");
1145 print_usage(prgname);
1150 if (f_present == 1) {
1151 printf("\"-f\" option present more than "
1153 print_usage(prgname);
1161 ret = parse_decimal(optarg);
1163 printf("Invalid number of buffers in a pool: "
1165 print_usage(prgname);
1169 nb_bufs_in_pool = ret;
1173 ret = parse_decimal(optarg);
1174 if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
1176 printf("Invalid frame buffer size value: %s\n",
1178 print_usage(prgname);
1181 frame_buf_size = ret;
1182 printf("Custom frame buffer size %u\n", frame_buf_size);
1185 app_sa_prm.enable = 1;
1188 app_sa_prm.window_size = parse_decimal(optarg);
1191 app_sa_prm.enable_esn = 1;
1194 app_sa_prm.enable = 1;
1195 app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
1198 ret = parse_decimal(optarg);
1200 printf("Invalid SA cache size: %s\n", optarg);
1201 print_usage(prgname);
1204 app_sa_prm.cache_sz = ret;
1207 ret = parse_decimal(optarg);
1209 printf("Invalid interval value: %s\n", optarg);
1210 print_usage(prgname);
1213 stats_interval = ret;
1215 case CMD_LINE_OPT_CONFIG_NUM:
1216 ret = parse_config(optarg);
1218 printf("Invalid config\n");
1219 print_usage(prgname);
1223 case CMD_LINE_OPT_SINGLE_SA_NUM:
1224 ret = parse_decimal(optarg);
1225 if (ret == -1 || ret > UINT32_MAX) {
1226 printf("Invalid argument[sa_idx]\n");
1227 print_usage(prgname);
1233 single_sa_idx = ret;
1234 eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1236 printf("Configured with single SA index %u\n",
1239 case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1240 ret = parse_portmask(optarg);
1242 printf("Invalid argument[portmask]\n");
1243 print_usage(prgname);
1248 enabled_cryptodev_mask = ret;
1251 case CMD_LINE_OPT_TRANSFER_MODE_NUM:
1252 ret = parse_transfer_mode(eh_conf, optarg);
1254 printf("Invalid packet transfer mode\n");
1255 print_usage(prgname);
1260 case CMD_LINE_OPT_SCHEDULE_TYPE_NUM:
1261 ret = parse_schedule_type(eh_conf, optarg);
1263 printf("Invalid queue schedule type\n");
1264 print_usage(prgname);
1269 case CMD_LINE_OPT_RX_OFFLOAD_NUM:
1270 ret = parse_mask(optarg, &dev_rx_offload);
1272 printf("Invalid argument for \'%s\': %s\n",
1273 CMD_LINE_OPT_RX_OFFLOAD, optarg);
1274 print_usage(prgname);
1278 case CMD_LINE_OPT_TX_OFFLOAD_NUM:
1279 ret = parse_mask(optarg, &dev_tx_offload);
1281 printf("Invalid argument for \'%s\': %s\n",
1282 CMD_LINE_OPT_TX_OFFLOAD, optarg);
1283 print_usage(prgname);
1287 case CMD_LINE_OPT_REASSEMBLE_NUM:
1288 ret = parse_decimal(optarg);
1289 if (ret < 0 || ret > UINT32_MAX) {
1290 printf("Invalid argument for \'%s\': %s\n",
1291 CMD_LINE_OPT_REASSEMBLE, optarg);
1292 print_usage(prgname);
1297 case CMD_LINE_OPT_MTU_NUM:
1298 ret = parse_decimal(optarg);
1299 if (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) {
1300 printf("Invalid argument for \'%s\': %s\n",
1301 CMD_LINE_OPT_MTU, optarg);
1302 print_usage(prgname);
1307 case CMD_LINE_OPT_FRAG_TTL_NUM:
1308 ret = parse_decimal(optarg);
1309 if (ret < 0 || ret > MAX_FRAG_TTL_NS) {
1310 printf("Invalid argument for \'%s\': %s\n",
1311 CMD_LINE_OPT_MTU, optarg);
1312 print_usage(prgname);
1317 case CMD_LINE_OPT_EVENT_VECTOR_NUM:
1318 em_conf = eh_conf->mode_params;
1319 em_conf->ext_params.event_vector = 1;
1321 case CMD_LINE_OPT_VECTOR_SIZE_NUM:
1322 ret = parse_decimal(optarg);
1324 if (ret > MAX_PKT_BURST_VEC) {
1325 printf("Invalid argument for \'%s\': %s\n",
1326 CMD_LINE_OPT_VECTOR_SIZE, optarg);
1327 print_usage(prgname);
1330 em_conf = eh_conf->mode_params;
1331 em_conf->ext_params.vector_size = ret;
1333 case CMD_LINE_OPT_VECTOR_TIMEOUT_NUM:
1334 ret = parse_decimal(optarg);
1336 em_conf = eh_conf->mode_params;
1337 em_conf->vector_tmo_ns = ret;
1339 case CMD_LINE_OPT_VECTOR_POOL_SZ_NUM:
1340 ret = parse_decimal(optarg);
1342 em_conf = eh_conf->mode_params;
1343 em_conf->vector_pool_sz = ret;
1345 case CMD_LINE_OPT_PER_PORT_POOL_NUM:
1349 print_usage(prgname);
1354 if (f_present == 0) {
1355 printf("Mandatory option \"-f\" not present\n");
1359 /* check do we need to enable multi-seg support */
1360 if (multi_seg_required()) {
1361 /* legacy mode doesn't support multi-seg */
1362 app_sa_prm.enable = 1;
1363 printf("frame buf size: %u, mtu: %u, "
1364 "number of reassemble entries: %u\n"
1365 "multi-segment support is required\n",
1366 frame_buf_size, mtu_size, frag_tbl_sz);
1369 print_app_sa_prm(&app_sa_prm);
1372 argv[optind-1] = prgname;
1375 optind = 1; /* reset getopt lib */
1380 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
1382 char buf[RTE_ETHER_ADDR_FMT_SIZE];
1383 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
1384 printf("%s%s", name, buf);
1388 * Update destination ethaddr for the port.
1391 add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr)
1393 if (port >= RTE_DIM(ethaddr_tbl))
1396 ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr);
1400 /* Check the link status of all ports in up to 9s, and print them finally */
1402 check_all_ports_link_status(uint32_t port_mask)
1404 #define CHECK_INTERVAL 100 /* 100ms */
1405 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1407 uint8_t count, all_ports_up, print_flag = 0;
1408 struct rte_eth_link link;
1410 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
1412 printf("\nChecking link status");
1414 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1416 RTE_ETH_FOREACH_DEV(portid) {
1417 if ((port_mask & (1 << portid)) == 0)
1419 memset(&link, 0, sizeof(link));
1420 ret = rte_eth_link_get_nowait(portid, &link);
1423 if (print_flag == 1)
1424 printf("Port %u link get failed: %s\n",
1425 portid, rte_strerror(-ret));
1428 /* print link status if flag set */
1429 if (print_flag == 1) {
1430 rte_eth_link_to_str(link_status_text,
1431 sizeof(link_status_text), &link);
1432 printf("Port %d %s\n", portid,
1436 /* clear all_ports_up flag if any link down */
1437 if (link.link_status == RTE_ETH_LINK_DOWN) {
1442 /* after finally printing all link status, get out */
1443 if (print_flag == 1)
1446 if (all_ports_up == 0) {
1449 rte_delay_ms(CHECK_INTERVAL);
1452 /* set the print_flag if all ports up or timeout */
1453 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1461 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1462 uint16_t qp, struct lcore_params *params,
1463 struct ipsec_ctx *ipsec_ctx,
1464 const struct rte_cryptodev_capabilities *cipher,
1465 const struct rte_cryptodev_capabilities *auth,
1466 const struct rte_cryptodev_capabilities *aead)
1470 struct cdev_key key = { 0 };
1472 key.lcore_id = params->lcore_id;
1474 key.cipher_algo = cipher->sym.cipher.algo;
1476 key.auth_algo = auth->sym.auth.algo;
1478 key.aead_algo = aead->sym.aead.algo;
1480 ret = rte_hash_lookup(map, &key);
1484 for (i = 0; i < ipsec_ctx->nb_qps; i++)
1485 if (ipsec_ctx->tbl[i].id == cdev_id)
1488 if (i == ipsec_ctx->nb_qps) {
1489 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1490 printf("Maximum number of crypto devices assigned to "
1491 "a core, increase MAX_QP_PER_LCORE value\n");
1494 ipsec_ctx->tbl[i].id = cdev_id;
1495 ipsec_ctx->tbl[i].qp = qp;
1496 ipsec_ctx->nb_qps++;
1497 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1498 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1502 ret = rte_hash_add_key_data(map, &key, (void *)i);
1504 printf("Failed to insert cdev mapping for (lcore %u, "
1505 "cdev %u, qp %u), errno %d\n",
1506 key.lcore_id, ipsec_ctx->tbl[i].id,
1507 ipsec_ctx->tbl[i].qp, ret);
1515 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1516 uint16_t qp, struct lcore_params *params)
1519 const struct rte_cryptodev_capabilities *i, *j;
1520 struct rte_hash *map;
1521 struct lcore_conf *qconf;
1522 struct ipsec_ctx *ipsec_ctx;
1525 qconf = &lcore_conf[params->lcore_id];
1527 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1529 ipsec_ctx = &qconf->outbound;
1533 ipsec_ctx = &qconf->inbound;
1537 /* Required cryptodevs with operation chaining */
1538 if (!(dev_info->feature_flags &
1539 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1542 for (i = dev_info->capabilities;
1543 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1544 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1547 if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1548 ret |= add_mapping(map, str, cdev_id, qp, params,
1549 ipsec_ctx, NULL, NULL, i);
1553 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1556 for (j = dev_info->capabilities;
1557 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1558 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1561 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1564 ret |= add_mapping(map, str, cdev_id, qp, params,
1565 ipsec_ctx, i, j, NULL);
1572 /* Check if the device is enabled by cryptodev_mask */
1574 check_cryptodev_mask(uint8_t cdev_id)
1576 if (enabled_cryptodev_mask & (1 << cdev_id))
1583 cryptodevs_init(uint16_t req_queue_num)
1585 struct rte_cryptodev_config dev_conf;
1586 struct rte_cryptodev_qp_conf qp_conf;
1587 uint16_t idx, max_nb_qps, qp, total_nb_qps, i;
1589 struct rte_hash_parameters params = { 0 };
1591 const uint64_t mseg_flag = multi_seg_required() ?
1592 RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
1594 params.entries = CDEV_MAP_ENTRIES;
1595 params.key_len = sizeof(struct cdev_key);
1596 params.hash_func = rte_jhash;
1597 params.hash_func_init_val = 0;
1598 params.socket_id = rte_socket_id();
1600 params.name = "cdev_map_in";
1601 cdev_map_in = rte_hash_create(¶ms);
1602 if (cdev_map_in == NULL)
1603 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1606 params.name = "cdev_map_out";
1607 cdev_map_out = rte_hash_create(¶ms);
1608 if (cdev_map_out == NULL)
1609 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1612 printf("lcore/cryptodev/qp mappings:\n");
1616 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1617 struct rte_cryptodev_info cdev_info;
1619 if (check_cryptodev_mask((uint8_t)cdev_id))
1622 rte_cryptodev_info_get(cdev_id, &cdev_info);
1624 if ((mseg_flag & cdev_info.feature_flags) != mseg_flag)
1625 rte_exit(EXIT_FAILURE,
1626 "Device %hd does not support \'%s\' feature\n",
1628 rte_cryptodev_get_feature_name(mseg_flag));
1630 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1631 max_nb_qps = cdev_info.max_nb_queue_pairs;
1633 max_nb_qps = nb_lcore_params;
1637 while (qp < max_nb_qps && i < nb_lcore_params) {
1638 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1639 &lcore_params[idx]))
1642 idx = idx % nb_lcore_params;
1646 qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp));
1651 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1652 dev_conf.nb_queue_pairs = qp;
1653 dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
1655 uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
1656 if (dev_max_sess != 0 &&
1657 dev_max_sess < get_nb_crypto_sessions())
1658 rte_exit(EXIT_FAILURE,
1659 "Device does not support at least %u "
1660 "sessions", get_nb_crypto_sessions());
1662 if (rte_cryptodev_configure(cdev_id, &dev_conf))
1663 rte_panic("Failed to initialize cryptodev %u\n",
1666 qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
1667 qp_conf.mp_session =
1668 socket_ctx[dev_conf.socket_id].session_pool;
1669 qp_conf.mp_session_private =
1670 socket_ctx[dev_conf.socket_id].session_priv_pool;
1671 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1672 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1673 &qp_conf, dev_conf.socket_id))
1674 rte_panic("Failed to setup queue %u for "
1675 "cdev_id %u\n", 0, cdev_id);
1677 if (rte_cryptodev_start(cdev_id))
1678 rte_panic("Failed to start cryptodev %u\n",
1684 return total_nb_qps;
1688 check_ptype(int portid)
1690 int l3_ipv4 = 0, l3_ipv6 = 0, l4_udp = 0, tunnel_esp = 0;
1694 mask = (RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
1695 RTE_PTYPE_TUNNEL_MASK);
1697 nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, NULL, 0);
1701 uint32_t ptypes[nb_ptypes];
1703 nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, ptypes, nb_ptypes);
1704 for (i = 0; i < nb_ptypes; ++i) {
1705 if (RTE_ETH_IS_IPV4_HDR(ptypes[i]))
1707 if (RTE_ETH_IS_IPV6_HDR(ptypes[i]))
1709 if ((ptypes[i] & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
1711 if ((ptypes[i] & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
1716 printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
1719 printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
1722 printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
1724 if (tunnel_esp == 0)
1725 printf("port %d cannot parse RTE_PTYPE_TUNNEL_ESP\n", portid);
1727 if (l3_ipv4 && l3_ipv6 && l4_udp && tunnel_esp)
1735 parse_ptype(struct rte_mbuf *m)
1737 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
1738 const struct rte_ipv4_hdr *iph4;
1739 const struct rte_ipv6_hdr *iph6;
1740 const struct rte_ether_hdr *eth;
1741 const struct rte_udp_hdr *udp;
1742 uint16_t nat_port, ether_type;
1748 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1749 ether_type = eth->ether_type;
1751 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
1752 iph4 = (const struct rte_ipv4_hdr *)(eth + 1);
1753 l3len = ((iph4->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
1754 RTE_IPV4_IHL_MULTIPLIER);
1756 if (l3len == sizeof(struct rte_ipv4_hdr))
1757 packet_type |= RTE_PTYPE_L3_IPV4;
1759 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
1761 next_proto = iph4->next_proto_id;
1762 p = (const uint8_t *)iph4;
1763 } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
1764 iph6 = (const struct rte_ipv6_hdr *)(eth + 1);
1765 l3len = sizeof(struct ip6_hdr);
1767 /* determine l3 header size up to ESP extension */
1768 next_proto = iph6->proto;
1769 p = (const uint8_t *)iph6;
1770 while (next_proto != IPPROTO_ESP && l3len < m->data_len &&
1771 (next_proto = rte_ipv6_get_next_ext(p + l3len,
1772 next_proto, &ext_len)) >= 0)
1775 /* Skip IPv6 header exceeds first segment length */
1776 if (unlikely(l3len + RTE_ETHER_HDR_LEN > m->data_len))
1779 if (l3len == sizeof(struct ip6_hdr))
1780 packet_type |= RTE_PTYPE_L3_IPV6;
1782 packet_type |= RTE_PTYPE_L3_IPV6_EXT;
1785 switch (next_proto) {
1787 packet_type |= RTE_PTYPE_TUNNEL_ESP;
1790 if (app_sa_prm.udp_encap == 1) {
1791 udp = (const struct rte_udp_hdr *)(p + l3len);
1792 nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
1793 if (udp->src_port == nat_port ||
1794 udp->dst_port == nat_port)
1796 MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
1803 m->packet_type = packet_type;
1807 parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused,
1808 struct rte_mbuf *pkts[], uint16_t nb_pkts,
1809 uint16_t max_pkts __rte_unused,
1810 void *user_param __rte_unused)
1814 if (unlikely(nb_pkts == 0))
1817 rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
1818 for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
1819 rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
1820 struct ether_hdr *));
1821 parse_ptype(pkts[i]);
1823 parse_ptype(pkts[i]);
1829 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
1831 struct rte_eth_dev_info dev_info;
1832 struct rte_eth_txconf *txconf;
1833 uint16_t nb_tx_queue, nb_rx_queue;
1834 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1835 int32_t ret, socket_id;
1836 struct lcore_conf *qconf;
1837 struct rte_ether_addr ethaddr;
1838 struct rte_eth_conf local_port_conf = port_conf;
1839 int ptype_supported;
1841 ret = rte_eth_dev_info_get(portid, &dev_info);
1843 rte_exit(EXIT_FAILURE,
1844 "Error during getting device (port %u) info: %s\n",
1845 portid, strerror(-ret));
1847 /* limit allowed HW offloads, as user requested */
1848 dev_info.rx_offload_capa &= dev_rx_offload;
1849 dev_info.tx_offload_capa &= dev_tx_offload;
1851 printf("Configuring device port %u:\n", portid);
1853 ret = rte_eth_macaddr_get(portid, ðaddr);
1855 rte_exit(EXIT_FAILURE,
1856 "Error getting MAC address (port %u): %s\n",
1857 portid, rte_strerror(-ret));
1859 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr);
1860 print_ethaddr("Address: ", ðaddr);
1863 nb_rx_queue = get_port_nb_rx_queues(portid);
1864 nb_tx_queue = nb_lcores;
1866 if (nb_rx_queue > dev_info.max_rx_queues)
1867 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1868 "(max rx queue is %u)\n",
1869 nb_rx_queue, dev_info.max_rx_queues);
1871 if (nb_tx_queue > dev_info.max_tx_queues)
1872 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1873 "(max tx queue is %u)\n",
1874 nb_tx_queue, dev_info.max_tx_queues);
1876 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1877 nb_rx_queue, nb_tx_queue);
1879 local_port_conf.rxmode.mtu = mtu_size;
1881 if (multi_seg_required()) {
1882 local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
1883 local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1886 local_port_conf.rxmode.offloads |= req_rx_offloads;
1887 local_port_conf.txmode.offloads |= req_tx_offloads;
1889 /* Check that all required capabilities are supported */
1890 if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1891 local_port_conf.rxmode.offloads)
1892 rte_exit(EXIT_FAILURE,
1893 "Error: port %u required RX offloads: 0x%" PRIx64
1894 ", available RX offloads: 0x%" PRIx64 "\n",
1895 portid, local_port_conf.rxmode.offloads,
1896 dev_info.rx_offload_capa);
1898 if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1899 local_port_conf.txmode.offloads)
1900 rte_exit(EXIT_FAILURE,
1901 "Error: port %u required TX offloads: 0x%" PRIx64
1902 ", available TX offloads: 0x%" PRIx64 "\n",
1903 portid, local_port_conf.txmode.offloads,
1904 dev_info.tx_offload_capa);
1906 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
1907 local_port_conf.txmode.offloads |=
1908 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1910 printf("port %u configuring rx_offloads=0x%" PRIx64
1911 ", tx_offloads=0x%" PRIx64 "\n",
1912 portid, local_port_conf.rxmode.offloads,
1913 local_port_conf.txmode.offloads);
1915 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1916 dev_info.flow_type_rss_offloads;
1917 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1918 port_conf.rx_adv_conf.rss_conf.rss_hf) {
1919 printf("Port %u modified RSS hash function based on hardware support,"
1920 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
1922 port_conf.rx_adv_conf.rss_conf.rss_hf,
1923 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1926 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
1929 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1930 "err=%d, port=%d\n", ret, portid);
1932 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
1934 rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
1935 "err=%d, port=%d\n", ret, portid);
1937 /* Check if required ptypes are supported */
1938 ptype_supported = check_ptype(portid);
1939 if (!ptype_supported)
1940 printf("Port %d: softly parse packet type info\n", portid);
1942 /* init one TX queue per lcore */
1944 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1945 if (rte_lcore_is_enabled(lcore_id) == 0)
1949 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1954 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
1956 txconf = &dev_info.default_txconf;
1957 txconf->offloads = local_port_conf.txmode.offloads;
1959 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
1962 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1963 "err=%d, port=%d\n", ret, portid);
1965 qconf = &lcore_conf[lcore_id];
1966 qconf->tx_queue_id[portid] = tx_queueid;
1968 /* Pre-populate pkt offloads based on capabilities */
1969 qconf->outbound.ipv4_offloads = RTE_MBUF_F_TX_IPV4;
1970 qconf->outbound.ipv6_offloads = RTE_MBUF_F_TX_IPV6;
1971 if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
1972 qconf->outbound.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM;
1976 /* init RX queues */
1977 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
1978 struct rte_eth_rxconf rxq_conf;
1979 struct rte_mempool *pool;
1981 if (portid != qconf->rx_queue_list[queue].port_id)
1984 rx_queueid = qconf->rx_queue_list[queue].queue_id;
1986 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
1989 rxq_conf = dev_info.default_rxconf;
1990 rxq_conf.offloads = local_port_conf.rxmode.offloads;
1993 pool = socket_ctx[socket_id].mbuf_pool[portid];
1995 pool = socket_ctx[socket_id].mbuf_pool[0];
1997 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
1998 nb_rxd, socket_id, &rxq_conf, pool);
2000 rte_exit(EXIT_FAILURE,
2001 "rte_eth_rx_queue_setup: err=%d, "
2002 "port=%d\n", ret, portid);
2004 /* Register Rx callback if ptypes are not supported */
2005 if (!ptype_supported &&
2006 !rte_eth_add_rx_callback(portid, queue,
2007 parse_ptype_cb, NULL)) {
2008 printf("Failed to add rx callback: port=%d, "
2009 "queue=%d\n", portid, queue);
2019 max_session_size(void)
2023 int16_t cdev_id, port_id, n;
2026 n = rte_cryptodev_count();
2027 for (cdev_id = 0; cdev_id != n; cdev_id++) {
2028 sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
2032 * If crypto device is security capable, need to check the
2033 * size of security session as well.
2036 /* Get security context of the crypto device */
2037 sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
2038 if (sec_ctx == NULL)
2041 /* Get size of security session */
2042 sz = rte_security_session_get_size(sec_ctx);
2047 RTE_ETH_FOREACH_DEV(port_id) {
2048 if ((enabled_port_mask & (1 << port_id)) == 0)
2051 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
2052 if (sec_ctx == NULL)
2055 sz = rte_security_session_get_size(sec_ctx);
2064 session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
2066 char mp_name[RTE_MEMPOOL_NAMESIZE];
2067 struct rte_mempool *sess_mp;
2070 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2071 "sess_mp_%u", socket_id);
2072 nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
2074 nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
2075 CDEV_MP_CACHE_MULTIPLIER);
2076 sess_mp = rte_cryptodev_sym_session_pool_create(
2077 mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0,
2079 ctx->session_pool = sess_mp;
2081 if (ctx->session_pool == NULL)
2082 rte_exit(EXIT_FAILURE,
2083 "Cannot init session pool on socket %d\n", socket_id);
2085 printf("Allocated session pool on socket %d\n", socket_id);
2089 session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
2092 char mp_name[RTE_MEMPOOL_NAMESIZE];
2093 struct rte_mempool *sess_mp;
2096 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2097 "sess_mp_priv_%u", socket_id);
2098 nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
2100 nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
2101 CDEV_MP_CACHE_MULTIPLIER);
2102 sess_mp = rte_mempool_create(mp_name,
2106 0, NULL, NULL, NULL,
2109 ctx->session_priv_pool = sess_mp;
2111 if (ctx->session_priv_pool == NULL)
2112 rte_exit(EXIT_FAILURE,
2113 "Cannot init session priv pool on socket %d\n",
2116 printf("Allocated session priv pool on socket %d\n",
2121 pool_init(struct socket_ctx *ctx, int32_t socket_id, int portid,
2128 /* mbuf_pool is initialised by the pool_init() function*/
2129 if (socket_ctx[socket_id].mbuf_pool[portid])
2132 snprintf(s, sizeof(s), "mbuf_pool_%d_%d", socket_id, portid);
2133 ctx->mbuf_pool[portid] = rte_pktmbuf_pool_create(s, nb_mbuf,
2135 ipsec_metadata_size(),
2140 * if multi-segment support is enabled, then create a pool
2141 * for indirect mbufs. This is not per-port but global.
2143 ms = multi_seg_required();
2144 if (ms != 0 && !ctx->mbuf_pool_indir) {
2145 snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
2146 ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
2147 MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
2150 if (ctx->mbuf_pool[portid] == NULL ||
2151 (ms != 0 && ctx->mbuf_pool_indir == NULL))
2152 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
2155 printf("Allocated mbuf pool on socket %d\n", socket_id);
2159 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
2161 struct ipsec_sa *sa;
2163 /* For inline protocol processing, the metadata in the event will
2164 * uniquely identify the security session which raised the event.
2165 * Application would then need the userdata it had registered with the
2166 * security session to process the event.
2169 sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
2172 /* userdata could not be retrieved */
2176 /* Sequence number over flow. SA need to be re-established */
2182 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2183 void *param, void *ret_param)
2186 struct rte_eth_event_ipsec_desc *event_desc = NULL;
2187 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
2188 rte_eth_dev_get_sec_ctx(port_id);
2190 RTE_SET_USED(param);
2192 if (type != RTE_ETH_EVENT_IPSEC)
2195 event_desc = ret_param;
2196 if (event_desc == NULL) {
2197 printf("Event descriptor not set\n");
2201 md = event_desc->metadata;
2203 if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
2204 return inline_ipsec_event_esn_overflow(ctx, md);
2205 else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
2206 printf("Invalid IPsec event reported\n");
2214 ethdev_reset_event_callback(uint16_t port_id,
2215 enum rte_eth_event_type type,
2216 void *param __rte_unused, void *ret_param __rte_unused)
2218 printf("Reset Event on port id %d type %d\n", port_id, type);
2219 printf("Force quit application");
2225 rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
2226 struct rte_mbuf *pkt[], uint16_t nb_pkts,
2227 __rte_unused uint16_t max_pkts, void *user_param)
2231 struct lcore_conf *lc;
2232 struct rte_mbuf *mb;
2233 struct rte_ether_hdr *eth;
2239 for (i = 0; i != nb_pkts; i++) {
2242 eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
2243 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
2245 struct rte_ipv4_hdr *iph;
2247 iph = (struct rte_ipv4_hdr *)(eth + 1);
2248 if (rte_ipv4_frag_pkt_is_fragmented(iph)) {
2250 mb->l2_len = sizeof(*eth);
2251 mb->l3_len = sizeof(*iph);
2252 tm = (tm != 0) ? tm : rte_rdtsc();
2253 mb = rte_ipv4_frag_reassemble_packet(
2254 lc->frag.tbl, &lc->frag.dr,
2258 /* fix ip cksum after reassemble. */
2259 iph = rte_pktmbuf_mtod_offset(mb,
2260 struct rte_ipv4_hdr *,
2262 iph->hdr_checksum = 0;
2263 iph->hdr_checksum = rte_ipv4_cksum(iph);
2266 } else if (eth->ether_type ==
2267 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
2269 struct rte_ipv6_hdr *iph;
2270 struct rte_ipv6_fragment_ext *fh;
2272 iph = (struct rte_ipv6_hdr *)(eth + 1);
2273 fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
2275 mb->l2_len = sizeof(*eth);
2276 mb->l3_len = (uintptr_t)fh - (uintptr_t)iph +
2278 tm = (tm != 0) ? tm : rte_rdtsc();
2279 mb = rte_ipv6_frag_reassemble_packet(
2280 lc->frag.tbl, &lc->frag.dr,
2283 /* fix l3_len after reassemble. */
2284 mb->l3_len = mb->l3_len - sizeof(*fh);
2292 /* some fragments were encountered, drain death row */
2294 rte_ip_frag_free_death_row(&lc->frag.dr, 0);
2301 reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)
2305 uint64_t frag_cycles;
2306 const struct lcore_rx_queue *rxq;
2307 const struct rte_eth_rxtx_callback *cb;
2309 /* create fragment table */
2310 sid = rte_lcore_to_socket_id(cid);
2311 frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) /
2312 NS_PER_S * frag_ttl_ns;
2314 lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
2315 FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
2316 if (lc->frag.tbl == NULL) {
2317 printf("%s(%u): failed to create fragment table of size: %u, "
2319 __func__, cid, frag_tbl_sz, rte_errno);
2323 /* setup reassemble RX callbacks for all queues */
2324 for (i = 0; i != lc->nb_rx_queue; i++) {
2326 rxq = lc->rx_queue_list + i;
2327 cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,
2330 printf("%s(%u): failed to install RX callback for "
2331 "portid=%u, queueid=%u, error code: %d\n",
2333 rxq->port_id, rxq->queue_id, rte_errno);
2342 reassemble_init(void)
2348 for (i = 0; i != nb_lcore_params; i++) {
2349 lc = lcore_params[i].lcore_id;
2350 rc = reassemble_lcore_init(lcore_conf + lc, lc);
2359 create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
2361 struct rte_flow_action action[2];
2362 struct rte_flow_item pattern[2];
2363 struct rte_flow_attr attr = {0};
2364 struct rte_flow_error err;
2365 struct rte_flow *flow;
2368 if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
2371 /* Add the default rte_flow to enable SECURITY for all ESP packets */
2373 pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
2374 pattern[0].spec = NULL;
2375 pattern[0].mask = NULL;
2376 pattern[0].last = NULL;
2377 pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
2379 action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
2380 action[0].conf = NULL;
2381 action[1].type = RTE_FLOW_ACTION_TYPE_END;
2382 action[1].conf = NULL;
2386 ret = rte_flow_validate(port_id, &attr, pattern, action, &err);
2390 flow = rte_flow_create(port_id, &attr, pattern, action, &err);
2394 flow_info_tbl[port_id].rx_def_flow = flow;
2395 RTE_LOG(INFO, IPSEC,
2396 "Created default flow enabling SECURITY for all ESP traffic on port %d\n",
2401 signal_handler(int signum)
2403 if (signum == SIGINT || signum == SIGTERM) {
2404 printf("\n\nSignal %d received, preparing to exit...\n",
2411 ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
2413 struct rte_ipsec_session *ips;
2419 for (i = 0; i < nb_sa; i++) {
2420 ips = ipsec_get_primary_session(&sa[i]);
2421 if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
2422 rte_exit(EXIT_FAILURE, "Event mode supports only "
2423 "inline protocol sessions\n");
2429 check_event_mode_params(struct eh_conf *eh_conf)
2431 struct eventmode_conf *em_conf = NULL;
2432 struct lcore_params *params;
2435 if (!eh_conf || !eh_conf->mode_params)
2438 /* Get eventmode conf */
2439 em_conf = eh_conf->mode_params;
2441 if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL &&
2442 em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) {
2443 printf("error: option --event-schedule-type applies only to "
2448 if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
2451 /* Set schedule type to ORDERED if it wasn't explicitly set by user */
2452 if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET)
2453 em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
2456 * Event mode currently supports only inline protocol sessions.
2457 * If there are other types of sessions configured then exit with
2460 ev_mode_sess_verify(sa_in, nb_sa_in);
2461 ev_mode_sess_verify(sa_out, nb_sa_out);
2464 /* Option --config does not apply to event mode */
2465 if (nb_lcore_params > 0) {
2466 printf("error: option --config applies only to poll mode\n");
2471 * In order to use the same port_init routine for both poll and event
2472 * modes initialize lcore_params with one queue for each eth port
2474 lcore_params = lcore_params_array;
2475 RTE_ETH_FOREACH_DEV(portid) {
2476 if ((enabled_port_mask & (1 << portid)) == 0)
2479 params = &lcore_params[nb_lcore_params++];
2480 params->port_id = portid;
2481 params->queue_id = 0;
2482 params->lcore_id = rte_get_next_lcore(0, 0, 1);
2489 one_session_free(struct rte_ipsec_session *ips)
2493 if (ips->type == RTE_SECURITY_ACTION_TYPE_NONE ||
2494 ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
2495 /* Session has not been created */
2496 if (ips->crypto.ses == NULL)
2499 ret = rte_cryptodev_sym_session_clear(ips->crypto.dev_id,
2504 ret = rte_cryptodev_sym_session_free(ips->crypto.ses);
2506 /* Session has not been created */
2507 if (ips->security.ctx == NULL || ips->security.ses == NULL)
2510 ret = rte_security_session_destroy(ips->security.ctx,
2518 sessions_free(struct sa_ctx *sa_ctx)
2520 struct rte_ipsec_session *ips;
2521 struct ipsec_sa *sa;
2528 for (i = 0; i < sa_ctx->nb_sa; i++) {
2530 sa = &sa_ctx->sa[i];
2534 ips = ipsec_get_primary_session(sa);
2535 ret = one_session_free(ips);
2537 RTE_LOG(ERR, IPSEC, "Failed to destroy security "
2538 "session type %d, spi %d\n",
2539 ips->type, sa->spi);
2544 calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq,
2547 return RTE_MAX((nb_rxq * nb_rxd +
2548 nb_ports * nb_lcores * MAX_PKT_BURST +
2549 nb_ports * nb_txq * nb_txd +
2550 nb_lcores * MEMPOOL_CACHE_SIZE +
2551 nb_crypto_qp * CDEV_QUEUE_DESC +
2552 nb_lcores * frag_tbl_sz *
2553 FRAG_TBL_BUCKET_ENTRIES),
2559 handle_telemetry_cmd_ipsec_secgw_stats(const char *cmd __rte_unused,
2560 const char *params, struct rte_tel_data *data)
2562 uint64_t total_pkts_dropped = 0, total_pkts_tx = 0, total_pkts_rx = 0;
2563 unsigned int coreid;
2565 rte_tel_data_start_dict(data);
2568 coreid = (uint32_t)atoi(params);
2569 if (rte_lcore_is_enabled(coreid) == 0)
2572 total_pkts_dropped = core_statistics[coreid].dropped;
2573 total_pkts_tx = core_statistics[coreid].tx;
2574 total_pkts_rx = core_statistics[coreid].rx;
2577 for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
2579 /* skip disabled cores */
2580 if (rte_lcore_is_enabled(coreid) == 0)
2583 total_pkts_dropped += core_statistics[coreid].dropped;
2584 total_pkts_tx += core_statistics[coreid].tx;
2585 total_pkts_rx += core_statistics[coreid].rx;
2589 /* add telemetry key/values pairs */
2590 rte_tel_data_add_dict_u64(data, "packets received",
2593 rte_tel_data_add_dict_u64(data, "packets transmitted",
2596 rte_tel_data_add_dict_u64(data, "packets dropped",
2597 total_pkts_dropped);
2604 update_lcore_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
2606 struct ipsec_core_statistics *lcore_stats;
2608 /* skip disabled cores */
2609 if (rte_lcore_is_enabled(coreid) == 0)
2612 lcore_stats = &core_statistics[coreid];
2614 total->rx = lcore_stats->rx;
2615 total->dropped = lcore_stats->dropped;
2616 total->tx = lcore_stats->tx;
2618 /* outbound stats */
2619 total->outbound.spd6.protect += lcore_stats->outbound.spd6.protect;
2620 total->outbound.spd6.bypass += lcore_stats->outbound.spd6.bypass;
2621 total->outbound.spd6.discard += lcore_stats->outbound.spd6.discard;
2623 total->outbound.spd4.protect += lcore_stats->outbound.spd4.protect;
2624 total->outbound.spd4.bypass += lcore_stats->outbound.spd4.bypass;
2625 total->outbound.spd4.discard += lcore_stats->outbound.spd4.discard;
2627 total->outbound.sad.miss += lcore_stats->outbound.sad.miss;
2630 total->inbound.spd6.protect += lcore_stats->inbound.spd6.protect;
2631 total->inbound.spd6.bypass += lcore_stats->inbound.spd6.bypass;
2632 total->inbound.spd6.discard += lcore_stats->inbound.spd6.discard;
2634 total->inbound.spd4.protect += lcore_stats->inbound.spd4.protect;
2635 total->inbound.spd4.bypass += lcore_stats->inbound.spd4.bypass;
2636 total->inbound.spd4.discard += lcore_stats->inbound.spd4.discard;
2638 total->inbound.sad.miss += lcore_stats->inbound.sad.miss;
2642 total->lpm4.miss += lcore_stats->lpm4.miss;
2643 total->lpm6.miss += lcore_stats->lpm6.miss;
2647 update_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
2649 memset(total, 0, sizeof(*total));
2651 if (coreid != UINT32_MAX) {
2652 update_lcore_statistics(total, coreid);
2654 for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++)
2655 update_lcore_statistics(total, coreid);
2660 handle_telemetry_cmd_ipsec_secgw_stats_outbound(const char *cmd __rte_unused,
2661 const char *params, struct rte_tel_data *data)
2663 struct ipsec_core_statistics total_stats;
2665 struct rte_tel_data *spd4_data = rte_tel_data_alloc();
2666 struct rte_tel_data *spd6_data = rte_tel_data_alloc();
2667 struct rte_tel_data *sad_data = rte_tel_data_alloc();
2668 unsigned int coreid = UINT32_MAX;
2671 /* verify allocated telemetry data structures */
2672 if (!spd4_data || !spd6_data || !sad_data) {
2677 /* initialize telemetry data structs as dicts */
2678 rte_tel_data_start_dict(data);
2680 rte_tel_data_start_dict(spd4_data);
2681 rte_tel_data_start_dict(spd6_data);
2682 rte_tel_data_start_dict(sad_data);
2685 coreid = (uint32_t)atoi(params);
2686 if (rte_lcore_is_enabled(coreid) == 0) {
2692 update_statistics(&total_stats, coreid);
2694 /* add spd 4 telemetry key/values pairs */
2696 rte_tel_data_add_dict_u64(spd4_data, "protect",
2697 total_stats.outbound.spd4.protect);
2698 rte_tel_data_add_dict_u64(spd4_data, "bypass",
2699 total_stats.outbound.spd4.bypass);
2700 rte_tel_data_add_dict_u64(spd4_data, "discard",
2701 total_stats.outbound.spd4.discard);
2703 rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
2705 /* add spd 6 telemetry key/values pairs */
2707 rte_tel_data_add_dict_u64(spd6_data, "protect",
2708 total_stats.outbound.spd6.protect);
2709 rte_tel_data_add_dict_u64(spd6_data, "bypass",
2710 total_stats.outbound.spd6.bypass);
2711 rte_tel_data_add_dict_u64(spd6_data, "discard",
2712 total_stats.outbound.spd6.discard);
2714 rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
2716 /* add sad telemetry key/values pairs */
2718 rte_tel_data_add_dict_u64(sad_data, "miss",
2719 total_stats.outbound.sad.miss);
2721 rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
2725 rte_tel_data_free(spd4_data);
2726 rte_tel_data_free(spd6_data);
2727 rte_tel_data_free(sad_data);
2733 handle_telemetry_cmd_ipsec_secgw_stats_inbound(const char *cmd __rte_unused,
2734 const char *params, struct rte_tel_data *data)
2736 struct ipsec_core_statistics total_stats;
2738 struct rte_tel_data *spd4_data = rte_tel_data_alloc();
2739 struct rte_tel_data *spd6_data = rte_tel_data_alloc();
2740 struct rte_tel_data *sad_data = rte_tel_data_alloc();
2741 unsigned int coreid = UINT32_MAX;
2744 /* verify allocated telemetry data structures */
2745 if (!spd4_data || !spd6_data || !sad_data) {
2750 /* initialize telemetry data structs as dicts */
2751 rte_tel_data_start_dict(data);
2752 rte_tel_data_start_dict(spd4_data);
2753 rte_tel_data_start_dict(spd6_data);
2754 rte_tel_data_start_dict(sad_data);
2756 /* add children dicts to parent dict */
2759 coreid = (uint32_t)atoi(params);
2760 if (rte_lcore_is_enabled(coreid) == 0) {
2766 update_statistics(&total_stats, coreid);
2768 /* add sad telemetry key/values pairs */
2770 rte_tel_data_add_dict_u64(sad_data, "miss",
2771 total_stats.inbound.sad.miss);
2773 rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
2775 /* add spd 4 telemetry key/values pairs */
2777 rte_tel_data_add_dict_u64(spd4_data, "protect",
2778 total_stats.inbound.spd4.protect);
2779 rte_tel_data_add_dict_u64(spd4_data, "bypass",
2780 total_stats.inbound.spd4.bypass);
2781 rte_tel_data_add_dict_u64(spd4_data, "discard",
2782 total_stats.inbound.spd4.discard);
2784 rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
2786 /* add spd 6 telemetry key/values pairs */
2788 rte_tel_data_add_dict_u64(spd6_data, "protect",
2789 total_stats.inbound.spd6.protect);
2790 rte_tel_data_add_dict_u64(spd6_data, "bypass",
2791 total_stats.inbound.spd6.bypass);
2792 rte_tel_data_add_dict_u64(spd6_data, "discard",
2793 total_stats.inbound.spd6.discard);
2795 rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
2799 rte_tel_data_free(spd4_data);
2800 rte_tel_data_free(spd6_data);
2801 rte_tel_data_free(sad_data);
2807 handle_telemetry_cmd_ipsec_secgw_stats_routing(const char *cmd __rte_unused,
2808 const char *params, struct rte_tel_data *data)
2810 struct ipsec_core_statistics total_stats;
2812 struct rte_tel_data *lpm4_data = rte_tel_data_alloc();
2813 struct rte_tel_data *lpm6_data = rte_tel_data_alloc();
2814 unsigned int coreid = UINT32_MAX;
2817 /* verify allocated telemetry data structures */
2818 if (!lpm4_data || !lpm6_data) {
2823 /* initialize telemetry data structs as dicts */
2824 rte_tel_data_start_dict(data);
2825 rte_tel_data_start_dict(lpm4_data);
2826 rte_tel_data_start_dict(lpm6_data);
2830 coreid = (uint32_t)atoi(params);
2831 if (rte_lcore_is_enabled(coreid) == 0) {
2837 update_statistics(&total_stats, coreid);
2839 /* add lpm 4 telemetry key/values pairs */
2840 rte_tel_data_add_dict_u64(lpm4_data, "miss",
2841 total_stats.lpm4.miss);
2843 rte_tel_data_add_dict_container(data, "IPv4 LPM", lpm4_data, 0);
2845 /* add lpm 6 telemetry key/values pairs */
2846 rte_tel_data_add_dict_u64(lpm6_data, "miss",
2847 total_stats.lpm6.miss);
2849 rte_tel_data_add_dict_container(data, "IPv6 LPM", lpm6_data, 0);
2853 rte_tel_data_free(lpm4_data);
2854 rte_tel_data_free(lpm6_data);
2860 ipsec_secgw_telemetry_init(void)
2862 rte_telemetry_register_cmd("/examples/ipsec-secgw/stats",
2863 handle_telemetry_cmd_ipsec_secgw_stats,
2864 "Returns global stats. "
2865 "Optional Parameters: int <logical core id>");
2867 rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/outbound",
2868 handle_telemetry_cmd_ipsec_secgw_stats_outbound,
2869 "Returns outbound global stats. "
2870 "Optional Parameters: int <logical core id>");
2872 rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/inbound",
2873 handle_telemetry_cmd_ipsec_secgw_stats_inbound,
2874 "Returns inbound global stats. "
2875 "Optional Parameters: int <logical core id>");
2877 rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/routing",
2878 handle_telemetry_cmd_ipsec_secgw_stats_routing,
2879 "Returns routing stats. "
2880 "Optional Parameters: int <logical core id>");
2885 main(int32_t argc, char **argv)
2888 uint32_t lcore_id, nb_txq, nb_rxq = 0;
2892 uint16_t portid, nb_crypto_qp, nb_ports = 0;
2893 uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
2894 uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
2895 struct eh_conf *eh_conf = NULL;
2898 nb_bufs_in_pool = 0;
2901 ret = rte_eal_init(argc, argv);
2903 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2908 signal(SIGINT, signal_handler);
2909 signal(SIGTERM, signal_handler);
2911 /* initialize event helper configuration */
2912 eh_conf = eh_conf_init();
2913 if (eh_conf == NULL)
2914 rte_exit(EXIT_FAILURE, "Failed to init event helper config");
2916 /* parse application arguments (after the EAL ones) */
2917 ret = parse_args(argc, argv, eh_conf);
2919 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
2921 ipsec_secgw_telemetry_init();
2923 /* parse configuration file */
2924 if (parse_cfg_file(cfgfile) < 0) {
2925 printf("parsing file \"%s\" failed\n",
2927 print_usage(argv[0]);
2931 if ((unprotected_port_mask & enabled_port_mask) !=
2932 unprotected_port_mask)
2933 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
2934 unprotected_port_mask);
2936 if (unprotected_port_mask && !nb_sa_in)
2937 rte_exit(EXIT_FAILURE, "Cannot use unprotected portmask without configured SA inbound\n");
2939 if (check_poll_mode_params(eh_conf) < 0)
2940 rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n");
2942 if (check_event_mode_params(eh_conf) < 0)
2943 rte_exit(EXIT_FAILURE, "check_event_mode_params failed\n");
2945 ret = init_lcore_rx_queues();
2947 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2949 nb_lcores = rte_lcore_count();
2951 sess_sz = max_session_size();
2954 * In event mode request minimum number of crypto queues
2955 * to be reserved equal to number of ports.
2957 if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT)
2958 nb_crypto_qp = rte_eth_dev_count_avail();
2962 nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
2964 if (nb_bufs_in_pool == 0) {
2965 RTE_ETH_FOREACH_DEV(portid) {
2966 if ((enabled_port_mask & (1 << portid)) == 0)
2969 nb_rxq += get_port_nb_rx_queues(portid);
2974 nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp,
2978 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2979 if (rte_lcore_is_enabled(lcore_id) == 0)
2983 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2987 if (per_port_pool) {
2988 RTE_ETH_FOREACH_DEV(portid) {
2989 if ((enabled_port_mask & (1 << portid)) == 0)
2992 pool_init(&socket_ctx[socket_id], socket_id,
2993 portid, nb_bufs_in_pool);
2996 pool_init(&socket_ctx[socket_id], socket_id, 0,
3000 if (socket_ctx[socket_id].session_pool)
3003 session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
3004 session_priv_pool_init(&socket_ctx[socket_id], socket_id,
3007 printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool);
3009 RTE_ETH_FOREACH_DEV(portid) {
3010 if ((enabled_port_mask & (1 << portid)) == 0)
3013 sa_check_offloads(portid, &req_rx_offloads[portid],
3014 &req_tx_offloads[portid]);
3015 port_init(portid, req_rx_offloads[portid],
3016 req_tx_offloads[portid]);
3020 * Set the enabled port mask in helper config for use by helper
3021 * sub-system. This will be used while initializing devices using
3022 * helper sub-system.
3024 eh_conf->eth_portmask = enabled_port_mask;
3026 /* Initialize eventmode components */
3027 ret = eh_devs_init(eh_conf);
3029 rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret);
3032 RTE_ETH_FOREACH_DEV(portid) {
3033 if ((enabled_port_mask & (1 << portid)) == 0)
3036 ret = rte_eth_dev_start(portid);
3038 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
3039 "err=%d, port=%d\n", ret, portid);
3041 /* Create flow after starting the device */
3042 create_default_ipsec_flow(portid, req_rx_offloads[portid]);
3045 * If enabled, put device in promiscuous mode.
3046 * This allows IO forwarding mode to forward packets
3047 * to itself through 2 cross-connected ports of the
3050 if (promiscuous_on) {
3051 ret = rte_eth_promiscuous_enable(portid);
3053 rte_exit(EXIT_FAILURE,
3054 "rte_eth_promiscuous_enable: err=%s, port=%d\n",
3055 rte_strerror(-ret), portid);
3058 rte_eth_dev_callback_register(portid, RTE_ETH_EVENT_INTR_RESET,
3059 ethdev_reset_event_callback, NULL);
3061 rte_eth_dev_callback_register(portid,
3062 RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
3065 /* fragment reassemble is enabled */
3066 if (frag_tbl_sz != 0) {
3067 ret = reassemble_init();
3069 rte_exit(EXIT_FAILURE, "failed at reassemble init");
3072 /* Replicate each context per socket */
3073 for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
3074 socket_id = rte_socket_id_by_idx(i);
3075 if ((socket_ctx[socket_id].session_pool != NULL) &&
3076 (socket_ctx[socket_id].sa_in == NULL) &&
3077 (socket_ctx[socket_id].sa_out == NULL)) {
3078 sa_init(&socket_ctx[socket_id], socket_id);
3079 sp4_init(&socket_ctx[socket_id], socket_id);
3080 sp6_init(&socket_ctx[socket_id], socket_id);
3081 rt_init(&socket_ctx[socket_id], socket_id);
3087 /* Get security context if available and only if dynamic field is
3088 * registered for fast path access.
3090 if (!rte_security_dynfield_is_registered())
3093 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
3094 for (i = 0; i < lcore_conf[lcore_id].nb_rx_queue; i++) {
3095 portid = lcore_conf[lcore_id].rx_queue_list[i].port_id;
3096 lcore_conf[lcore_id].rx_queue_list[i].sec_ctx =
3097 rte_eth_dev_get_sec_ctx(portid);
3102 check_all_ports_link_status(enabled_port_mask);
3104 if (stats_interval > 0)
3105 rte_eal_alarm_set(stats_interval * US_PER_S,
3106 print_stats_cb, NULL);
3108 RTE_LOG(INFO, IPSEC, "Stats display disabled\n");
3110 /* launch per-lcore init on every lcore */
3111 rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN);
3112 RTE_LCORE_FOREACH_WORKER(lcore_id) {
3113 if (rte_eal_wait_lcore(lcore_id) < 0)
3117 /* Uninitialize eventmode components */
3118 ret = eh_devs_uninit(eh_conf);
3120 rte_exit(EXIT_FAILURE, "eh_devs_uninit failed, err=%d\n", ret);
3122 /* Free eventmode configuration memory */
3123 eh_conf_uninit(eh_conf);
3125 /* Destroy inbound and outbound sessions */
3126 for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
3127 socket_id = rte_socket_id_by_idx(i);
3128 sessions_free(socket_ctx[socket_id].sa_in);
3129 sessions_free(socket_ctx[socket_id].sa_out);
3132 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
3133 printf("Closing cryptodev %d...", cdev_id);
3134 rte_cryptodev_stop(cdev_id);
3135 rte_cryptodev_close(cdev_id);
3139 RTE_ETH_FOREACH_DEV(portid) {
3140 if ((enabled_port_mask & (1 << portid)) == 0)
3143 printf("Closing port %d...", portid);
3144 if (flow_info_tbl[portid].rx_def_flow) {
3145 struct rte_flow_error err;
3147 ret = rte_flow_destroy(portid,
3148 flow_info_tbl[portid].rx_def_flow, &err);
3150 RTE_LOG(ERR, IPSEC, "Failed to destroy flow "
3151 " for port %u, err msg: %s\n", portid,
3154 ret = rte_eth_dev_stop(portid);
3157 "rte_eth_dev_stop: err=%s, port=%u\n",
3158 rte_strerror(-ret), portid);
3160 rte_eth_dev_close(portid);
3164 /* clean up the EAL */