+ if (type != RTE_ETH_EVENT_IPSEC)
+ return -1;
+
+ event_desc = ret_param;
+ if (event_desc == NULL) {
+ printf("Event descriptor not set\n");
+ return -1;
+ }
+
+ md = event_desc->metadata;
+
+ if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
+ return inline_ipsec_event_esn_overflow(ctx, md);
+ else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
+ printf("Invalid IPsec event reported\n");
+ return -1;
+ }
+
+ return -1;
+}
+
+static int
+ethdev_reset_event_callback(uint16_t port_id,
+ enum rte_eth_event_type type,
+ void *param __rte_unused, void *ret_param __rte_unused)
+{
+ printf("Reset Event on port id %d type %d\n", port_id, type);
+ printf("Force quit application");
+ force_quit = true;
+ return 0;
+}
+
+static uint16_t
+rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkt[], uint16_t nb_pkts,
+ __rte_unused uint16_t max_pkts, void *user_param)
+{
+ uint64_t tm;
+ uint32_t i, k;
+ struct lcore_conf *lc;
+ struct rte_mbuf *mb;
+ struct rte_ether_hdr *eth;
+
+ lc = user_param;
+ k = 0;
+ tm = 0;
+
+ for (i = 0; i != nb_pkts; i++) {
+
+ mb = pkt[i];
+ eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
+ if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+
+ struct rte_ipv4_hdr *iph;
+
+ iph = (struct rte_ipv4_hdr *)(eth + 1);
+ if (rte_ipv4_frag_pkt_is_fragmented(iph)) {
+
+ mb->l2_len = sizeof(*eth);
+ mb->l3_len = sizeof(*iph);
+ tm = (tm != 0) ? tm : rte_rdtsc();
+ mb = rte_ipv4_frag_reassemble_packet(
+ lc->frag.tbl, &lc->frag.dr,
+ mb, tm, iph);
+
+ if (mb != NULL) {
+ /* fix ip cksum after reassemble. */
+ iph = rte_pktmbuf_mtod_offset(mb,
+ struct rte_ipv4_hdr *,
+ mb->l2_len);
+ iph->hdr_checksum = 0;
+ iph->hdr_checksum = rte_ipv4_cksum(iph);
+ }
+ }
+ } else if (eth->ether_type ==
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+
+ struct rte_ipv6_hdr *iph;
+ struct rte_ipv6_fragment_ext *fh;
+
+ iph = (struct rte_ipv6_hdr *)(eth + 1);
+ fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
+ if (fh != NULL) {
+ mb->l2_len = sizeof(*eth);
+ mb->l3_len = (uintptr_t)fh - (uintptr_t)iph +
+ sizeof(*fh);
+ tm = (tm != 0) ? tm : rte_rdtsc();
+ mb = rte_ipv6_frag_reassemble_packet(
+ lc->frag.tbl, &lc->frag.dr,
+ mb, tm, iph, fh);
+ if (mb != NULL)
+ /* fix l3_len after reassemble. */
+ mb->l3_len = mb->l3_len - sizeof(*fh);
+ }
+ }
+
+ pkt[k] = mb;
+ k += (mb != NULL);
+ }
+
+ /* some fragments were encountered, drain death row */
+ if (tm != 0)
+ rte_ip_frag_free_death_row(&lc->frag.dr, 0);
+
+ return k;
+}
+
+
+static int
+reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)
+{
+ int32_t sid;
+ uint32_t i;
+ uint64_t frag_cycles;
+ const struct lcore_rx_queue *rxq;
+ const struct rte_eth_rxtx_callback *cb;
+
+ /* create fragment table */
+ sid = rte_lcore_to_socket_id(cid);
+ frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) /
+ NS_PER_S * frag_ttl_ns;
+
+ lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
+ FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
+ if (lc->frag.tbl == NULL) {
+ printf("%s(%u): failed to create fragment table of size: %u, "
+ "error code: %d\n",
+ __func__, cid, frag_tbl_sz, rte_errno);
+ return -ENOMEM;
+ }
+
+ /* setup reassemble RX callbacks for all queues */
+ for (i = 0; i != lc->nb_rx_queue; i++) {
+
+ rxq = lc->rx_queue_list + i;
+ cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,
+ rx_callback, lc);
+ if (cb == NULL) {
+ printf("%s(%u): failed to install RX callback for "
+ "portid=%u, queueid=%u, error code: %d\n",
+ __func__, cid,
+ rxq->port_id, rxq->queue_id, rte_errno);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int
+reassemble_init(void)
+{
+ int32_t rc;
+ uint32_t i, lc;
+
+ rc = 0;
+ for (i = 0; i != nb_lcore_params; i++) {
+ lc = lcore_params[i].lcore_id;
+ rc = reassemble_lcore_init(lcore_conf + lc, lc);
+ if (rc != 0)
+ break;
+ }
+
+ return rc;
+}
+
+static void
+create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
+{
+ struct rte_flow_action action[2];
+ struct rte_flow_item pattern[2];
+ struct rte_flow_attr attr = {0};
+ struct rte_flow_error err;
+ struct rte_flow *flow;
+ int ret;
+
+ if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
+ return;
+
+ /* Add the default rte_flow to enable SECURITY for all ESP packets */
+
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
+ pattern[0].spec = NULL;
+ pattern[0].mask = NULL;
+ pattern[0].last = NULL;
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
+
+ action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
+ action[0].conf = NULL;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ action[1].conf = NULL;
+
+ attr.ingress = 1;
+
+ ret = rte_flow_validate(port_id, &attr, pattern, action, &err);
+ if (ret)
+ return;
+
+ flow = rte_flow_create(port_id, &attr, pattern, action, &err);
+ if (flow == NULL)
+ return;
+
+ flow_info_tbl[port_id].rx_def_flow = flow;
+ RTE_LOG(INFO, IPSEC,
+ "Created default flow enabling SECURITY for all ESP traffic on port %d\n",
+ port_id);
+}
+
+static void
+signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\n\nSignal %d received, preparing to exit...\n",
+ signum);
+ force_quit = true;
+ }
+}
+
+static void
+ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
+{
+ struct rte_ipsec_session *ips;
+ int32_t i;
+
+ if (!sa || !nb_sa)
+ return;
+
+ for (i = 0; i < nb_sa; i++) {
+ ips = ipsec_get_primary_session(&sa[i]);
+ if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ rte_exit(EXIT_FAILURE, "Event mode supports only "
+ "inline protocol sessions\n");
+ }
+
+}
+
+static int32_t
+check_event_mode_params(struct eh_conf *eh_conf)
+{
+ struct eventmode_conf *em_conf = NULL;
+ struct lcore_params *params;
+ uint16_t portid;
+
+ if (!eh_conf || !eh_conf->mode_params)
+ return -EINVAL;
+
+ /* Get eventmode conf */
+ em_conf = eh_conf->mode_params;
+
+ if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL &&
+ em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) {
+ printf("error: option --event-schedule-type applies only to "
+ "event mode\n");
+ return -EINVAL;
+ }
+
+ if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
+ return 0;
+
+ /* Set schedule type to ORDERED if it wasn't explicitly set by user */
+ if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET)
+ em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
+
+ /*
+ * Event mode currently supports only inline protocol sessions.
+ * If there are other types of sessions configured then exit with
+ * error.
+ */
+ ev_mode_sess_verify(sa_in, nb_sa_in);
+ ev_mode_sess_verify(sa_out, nb_sa_out);
+
+
+ /* Option --config does not apply to event mode */
+ if (nb_lcore_params > 0) {
+ printf("error: option --config applies only to poll mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * In order to use the same port_init routine for both poll and event
+ * modes initialize lcore_params with one queue for each eth port
+ */
+ lcore_params = lcore_params_array;
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ params = &lcore_params[nb_lcore_params++];
+ params->port_id = portid;
+ params->queue_id = 0;
+ params->lcore_id = rte_get_next_lcore(0, 0, 1);
+ }
+
+ return 0;
+}
+
+static void
+inline_sessions_free(struct sa_ctx *sa_ctx)
+{
+ struct rte_ipsec_session *ips;
+ struct ipsec_sa *sa;
+ int32_t ret;
+ uint32_t i;
+
+ if (!sa_ctx)
+ return;
+
+ for (i = 0; i < sa_ctx->nb_sa; i++) {
+
+ sa = &sa_ctx->sa[i];
+ if (!sa->spi)
+ continue;
+
+ ips = ipsec_get_primary_session(sa);
+ if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
+ ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ continue;
+
+ if (!rte_eth_dev_is_valid_port(sa->portid))
+ continue;
+
+ ret = rte_security_session_destroy(
+ rte_eth_dev_get_sec_ctx(sa->portid),
+ ips->security.ses);
+ if (ret)
+ RTE_LOG(ERR, IPSEC, "Failed to destroy security "
+ "session type %d, spi %d\n",
+ ips->type, sa->spi);
+ }
+}
+
+static uint32_t
+calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq,
+ uint32_t nb_txq)
+{
+ return RTE_MAX((nb_rxq * nb_rxd +
+ nb_ports * nb_lcores * MAX_PKT_BURST +
+ nb_ports * nb_txq * nb_txd +
+ nb_lcores * MEMPOOL_CACHE_SIZE +
+ nb_crypto_qp * CDEV_QUEUE_DESC +
+ nb_lcores * frag_tbl_sz *
+ FRAG_TBL_BUCKET_ENTRIES),
+ 8192U);
+}
+
+
+static int
+handle_telemetry_cmd_ipsec_secgw_stats(const char *cmd __rte_unused,
+ const char *params, struct rte_tel_data *data)
+{
+ uint64_t total_pkts_dropped = 0, total_pkts_tx = 0, total_pkts_rx = 0;
+ unsigned int coreid;
+
+ rte_tel_data_start_dict(data);
+
+ if (params) {
+ coreid = (uint32_t)atoi(params);
+ if (rte_lcore_is_enabled(coreid) == 0)
+ return -EINVAL;
+
+ total_pkts_dropped = core_statistics[coreid].dropped;
+ total_pkts_tx = core_statistics[coreid].tx;
+ total_pkts_rx = core_statistics[coreid].rx;
+
+ } else {
+ for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
+
+ /* skip disabled cores */
+ if (rte_lcore_is_enabled(coreid) == 0)
+ continue;
+
+ total_pkts_dropped += core_statistics[coreid].dropped;
+ total_pkts_tx += core_statistics[coreid].tx;
+ total_pkts_rx += core_statistics[coreid].rx;
+ }
+ }
+
+ /* add telemetry key/values pairs */
+ rte_tel_data_add_dict_u64(data, "packets received",
+ total_pkts_rx);
+
+ rte_tel_data_add_dict_u64(data, "packets transmitted",
+ total_pkts_tx);
+
+ rte_tel_data_add_dict_u64(data, "packets dropped",
+ total_pkts_dropped);
+
+
+ return 0;
+}
+
+static void
+update_lcore_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
+{
+ struct ipsec_core_statistics *lcore_stats;
+
+ /* skip disabled cores */
+ if (rte_lcore_is_enabled(coreid) == 0)
+ return;
+
+ lcore_stats = &core_statistics[coreid];
+
+ total->rx = lcore_stats->rx;
+ total->dropped = lcore_stats->dropped;
+ total->tx = lcore_stats->tx;
+
+ /* outbound stats */
+ total->outbound.spd6.protect += lcore_stats->outbound.spd6.protect;
+ total->outbound.spd6.bypass += lcore_stats->outbound.spd6.bypass;
+ total->outbound.spd6.discard += lcore_stats->outbound.spd6.discard;
+
+ total->outbound.spd4.protect += lcore_stats->outbound.spd4.protect;
+ total->outbound.spd4.bypass += lcore_stats->outbound.spd4.bypass;
+ total->outbound.spd4.discard += lcore_stats->outbound.spd4.discard;
+
+ total->outbound.sad.miss += lcore_stats->outbound.sad.miss;
+
+ /* inbound stats */
+ total->inbound.spd6.protect += lcore_stats->inbound.spd6.protect;
+ total->inbound.spd6.bypass += lcore_stats->inbound.spd6.bypass;
+ total->inbound.spd6.discard += lcore_stats->inbound.spd6.discard;
+
+ total->inbound.spd4.protect += lcore_stats->inbound.spd4.protect;
+ total->inbound.spd4.bypass += lcore_stats->inbound.spd4.bypass;
+ total->inbound.spd4.discard += lcore_stats->inbound.spd4.discard;
+
+ total->inbound.sad.miss += lcore_stats->inbound.sad.miss;
+
+
+ /* routing stats */
+ total->lpm4.miss += lcore_stats->lpm4.miss;
+ total->lpm6.miss += lcore_stats->lpm6.miss;
+}
+
+static void
+update_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
+{
+ memset(total, 0, sizeof(*total));
+
+ if (coreid != UINT32_MAX) {
+ update_lcore_statistics(total, coreid);
+ } else {
+ for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++)
+ update_lcore_statistics(total, coreid);
+ }
+}
+
+static int
+handle_telemetry_cmd_ipsec_secgw_stats_outbound(const char *cmd __rte_unused,
+ const char *params, struct rte_tel_data *data)
+{
+ struct ipsec_core_statistics total_stats;
+
+ struct rte_tel_data *spd4_data = rte_tel_data_alloc();
+ struct rte_tel_data *spd6_data = rte_tel_data_alloc();
+ struct rte_tel_data *sad_data = rte_tel_data_alloc();
+ unsigned int coreid = UINT32_MAX;
+ int rc = 0;
+
+ /* verify allocated telemetry data structures */
+ if (!spd4_data || !spd6_data || !sad_data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize telemetry data structs as dicts */
+ rte_tel_data_start_dict(data);
+
+ rte_tel_data_start_dict(spd4_data);
+ rte_tel_data_start_dict(spd6_data);
+ rte_tel_data_start_dict(sad_data);
+
+ if (params) {
+ coreid = (uint32_t)atoi(params);
+ if (rte_lcore_is_enabled(coreid) == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+
+ update_statistics(&total_stats, coreid);
+
+ /* add spd 4 telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(spd4_data, "protect",
+ total_stats.outbound.spd4.protect);
+ rte_tel_data_add_dict_u64(spd4_data, "bypass",
+ total_stats.outbound.spd4.bypass);
+ rte_tel_data_add_dict_u64(spd4_data, "discard",
+ total_stats.outbound.spd4.discard);
+
+ rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
+
+ /* add spd 6 telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(spd6_data, "protect",
+ total_stats.outbound.spd6.protect);
+ rte_tel_data_add_dict_u64(spd6_data, "bypass",
+ total_stats.outbound.spd6.bypass);
+ rte_tel_data_add_dict_u64(spd6_data, "discard",
+ total_stats.outbound.spd6.discard);
+
+ rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
+
+ /* add sad telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(sad_data, "miss",
+ total_stats.outbound.sad.miss);
+
+ rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
+
+exit:
+ if (rc) {
+ rte_tel_data_free(spd4_data);
+ rte_tel_data_free(spd6_data);
+ rte_tel_data_free(sad_data);
+ }
+ return rc;
+}
+
+static int
+handle_telemetry_cmd_ipsec_secgw_stats_inbound(const char *cmd __rte_unused,
+ const char *params, struct rte_tel_data *data)
+{
+ struct ipsec_core_statistics total_stats;
+
+ struct rte_tel_data *spd4_data = rte_tel_data_alloc();
+ struct rte_tel_data *spd6_data = rte_tel_data_alloc();
+ struct rte_tel_data *sad_data = rte_tel_data_alloc();
+ unsigned int coreid = UINT32_MAX;
+ int rc = 0;
+
+ /* verify allocated telemetry data structures */
+ if (!spd4_data || !spd6_data || !sad_data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize telemetry data structs as dicts */
+ rte_tel_data_start_dict(data);
+ rte_tel_data_start_dict(spd4_data);
+ rte_tel_data_start_dict(spd6_data);
+ rte_tel_data_start_dict(sad_data);
+
+ /* add children dicts to parent dict */
+
+ if (params) {
+ coreid = (uint32_t)atoi(params);
+ if (rte_lcore_is_enabled(coreid) == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+
+ update_statistics(&total_stats, coreid);
+
+ /* add sad telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(sad_data, "miss",
+ total_stats.inbound.sad.miss);
+
+ rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
+
+ /* add spd 4 telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(spd4_data, "protect",
+ total_stats.inbound.spd4.protect);
+ rte_tel_data_add_dict_u64(spd4_data, "bypass",
+ total_stats.inbound.spd4.bypass);
+ rte_tel_data_add_dict_u64(spd4_data, "discard",
+ total_stats.inbound.spd4.discard);
+
+ rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
+
+ /* add spd 6 telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(spd6_data, "protect",
+ total_stats.inbound.spd6.protect);
+ rte_tel_data_add_dict_u64(spd6_data, "bypass",
+ total_stats.inbound.spd6.bypass);
+ rte_tel_data_add_dict_u64(spd6_data, "discard",
+ total_stats.inbound.spd6.discard);
+
+ rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
+
+exit:
+ if (rc) {
+ rte_tel_data_free(spd4_data);
+ rte_tel_data_free(spd6_data);
+ rte_tel_data_free(sad_data);
+ }
+ return rc;
+}
+
+static int
+handle_telemetry_cmd_ipsec_secgw_stats_routing(const char *cmd __rte_unused,
+ const char *params, struct rte_tel_data *data)
+{
+ struct ipsec_core_statistics total_stats;
+
+ struct rte_tel_data *lpm4_data = rte_tel_data_alloc();
+ struct rte_tel_data *lpm6_data = rte_tel_data_alloc();
+ unsigned int coreid = UINT32_MAX;
+ int rc = 0;
+
+ /* verify allocated telemetry data structures */
+ if (!lpm4_data || !lpm6_data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize telemetry data structs as dicts */
+ rte_tel_data_start_dict(data);
+ rte_tel_data_start_dict(lpm4_data);
+ rte_tel_data_start_dict(lpm6_data);
+
+
+ if (params) {
+ coreid = (uint32_t)atoi(params);
+ if (rte_lcore_is_enabled(coreid) == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+
+ update_statistics(&total_stats, coreid);
+
+ /* add lpm 4 telemetry key/values pairs */
+ rte_tel_data_add_dict_u64(lpm4_data, "miss",
+ total_stats.lpm4.miss);
+
+ rte_tel_data_add_dict_container(data, "IPv4 LPM", lpm4_data, 0);
+
+ /* add lpm 6 telemetry key/values pairs */
+ rte_tel_data_add_dict_u64(lpm6_data, "miss",
+ total_stats.lpm6.miss);
+
+ rte_tel_data_add_dict_container(data, "IPv6 LPM", lpm6_data, 0);
+
+exit:
+ if (rc) {
+ rte_tel_data_free(lpm4_data);
+ rte_tel_data_free(lpm6_data);
+ }
+ return rc;
+}
+
+static void
+ipsec_secgw_telemetry_init(void)
+{
+ rte_telemetry_register_cmd("/examples/ipsec-secgw/stats",
+ handle_telemetry_cmd_ipsec_secgw_stats,
+ "Returns global stats. "
+ "Optional Parameters: int <logical core id>");
+
+ rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/outbound",
+ handle_telemetry_cmd_ipsec_secgw_stats_outbound,
+ "Returns outbound global stats. "
+ "Optional Parameters: int <logical core id>");
+
+ rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/inbound",
+ handle_telemetry_cmd_ipsec_secgw_stats_inbound,
+ "Returns inbound global stats. "
+ "Optional Parameters: int <logical core id>");
+
+ rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/routing",
+ handle_telemetry_cmd_ipsec_secgw_stats_routing,
+ "Returns routing stats. "
+ "Optional Parameters: int <logical core id>");
+}
+
+
+int32_t
+main(int32_t argc, char **argv)
+{
+ int32_t ret;
+ uint32_t lcore_id, nb_txq, nb_rxq = 0;
+ uint32_t cdev_id;
+ uint32_t i;
+ uint8_t socket_id;
+ uint16_t portid, nb_crypto_qp, nb_ports = 0;
+ uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
+ uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
+ struct eh_conf *eh_conf = NULL;
+ size_t sess_sz;
+
+ nb_bufs_in_pool = 0;
+
+ /* init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
+ argc -= ret;
+ argv += ret;
+
+ force_quit = false;
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ /* initialize event helper configuration */
+ eh_conf = eh_conf_init();
+ if (eh_conf == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to init event helper config");
+
+ /* parse application arguments (after the EAL ones) */
+ ret = parse_args(argc, argv, eh_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid parameters\n");
+
+ ipsec_secgw_telemetry_init();
+
+ /* parse configuration file */
+ if (parse_cfg_file(cfgfile) < 0) {
+ printf("parsing file \"%s\" failed\n",
+ optarg);
+ print_usage(argv[0]);
+ return -1;
+ }
+
+ if ((unprotected_port_mask & enabled_port_mask) !=
+ unprotected_port_mask)
+ rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
+ unprotected_port_mask);
+
+ if (unprotected_port_mask && !nb_sa_in)
+ rte_exit(EXIT_FAILURE, "Cannot use unprotected portmask without configured SA inbound\n");
+
+ if (check_poll_mode_params(eh_conf) < 0)
+ rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n");
+
+ if (check_event_mode_params(eh_conf) < 0)
+ rte_exit(EXIT_FAILURE, "check_event_mode_params failed\n");
+
+ ret = init_lcore_rx_queues();
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
+
+ nb_lcores = rte_lcore_count();
+
+ sess_sz = max_session_size();
+
+ /*
+ * In event mode request minimum number of crypto queues
+ * to be reserved equal to number of ports.
+ */
+ if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT)
+ nb_crypto_qp = rte_eth_dev_count_avail();
+ else
+ nb_crypto_qp = 0;
+
+ nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
+
+ if (nb_bufs_in_pool == 0) {
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ nb_ports++;
+ nb_rxq += get_port_nb_rx_queues(portid);
+ }
+
+ nb_txq = nb_lcores;
+
+ nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp,
+ nb_rxq, nb_txq);
+ }
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (rte_lcore_is_enabled(lcore_id) == 0)
+ continue;