+static void
+create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
+{
+ struct rte_flow_action action[2];
+ struct rte_flow_item pattern[2];
+ struct rte_flow_attr attr = {0};
+ struct rte_flow_error err;
+ struct rte_flow *flow;
+ int ret;
+
+ if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ return;
+
+ /* Add the default rte_flow to enable SECURITY for all ESP packets */
+
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
+ pattern[0].spec = NULL;
+ pattern[0].mask = NULL;
+ pattern[0].last = NULL;
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
+
+ action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
+ action[0].conf = NULL;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ action[1].conf = NULL;
+
+ attr.ingress = 1;
+
+ ret = rte_flow_validate(port_id, &attr, pattern, action, &err);
+ if (ret)
+ return;
+
+ flow = rte_flow_create(port_id, &attr, pattern, action, &err);
+ if (flow == NULL)
+ return;
+
+ flow_info_tbl[port_id].rx_def_flow = flow;
+ RTE_LOG(INFO, IPSEC,
+ "Created default flow enabling SECURITY for all ESP traffic on port %d\n",
+ port_id);
+}
+
+static void
+signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\n\nSignal %d received, preparing to exit...\n",
+ signum);
+ force_quit = true;
+ }
+}
+
+static void
+ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
+{
+ struct rte_ipsec_session *ips;
+ int32_t i;
+
+ if (!sa || !nb_sa)
+ return;
+
+ for (i = 0; i < nb_sa; i++) {
+ ips = ipsec_get_primary_session(&sa[i]);
+ if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ rte_exit(EXIT_FAILURE, "Event mode supports only "
+ "inline protocol sessions\n");
+ }
+
+}
+
+static int32_t
+check_event_mode_params(struct eh_conf *eh_conf)
+{
+ struct eventmode_conf *em_conf = NULL;
+ struct lcore_params *params;
+ uint16_t portid;
+
+ if (!eh_conf || !eh_conf->mode_params)
+ return -EINVAL;
+
+ /* Get eventmode conf */
+ em_conf = eh_conf->mode_params;
+
+ if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL &&
+ em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) {
+ printf("error: option --event-schedule-type applies only to "
+ "event mode\n");
+ return -EINVAL;
+ }
+
+ if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
+ return 0;
+
+ /* Set schedule type to ORDERED if it wasn't explicitly set by user */
+ if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET)
+ em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
+
+ /*
+ * Event mode currently supports only inline protocol sessions.
+ * If there are other types of sessions configured then exit with
+ * error.
+ */
+ ev_mode_sess_verify(sa_in, nb_sa_in);
+ ev_mode_sess_verify(sa_out, nb_sa_out);
+
+
+ /* Option --config does not apply to event mode */
+ if (nb_lcore_params > 0) {
+ printf("error: option --config applies only to poll mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * In order to use the same port_init routine for both poll and event
+ * modes initialize lcore_params with one queue for each eth port
+ */
+ lcore_params = lcore_params_array;
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ params = &lcore_params[nb_lcore_params++];
+ params->port_id = portid;
+ params->queue_id = 0;
+ params->lcore_id = rte_get_next_lcore(0, 0, 1);
+ }
+
+ return 0;
+}
+
+static void
+inline_sessions_free(struct sa_ctx *sa_ctx)
+{
+ struct rte_ipsec_session *ips;
+ struct ipsec_sa *sa;
+ int32_t ret;
+ uint32_t i;
+
+ if (!sa_ctx)
+ return;
+
+ for (i = 0; i < sa_ctx->nb_sa; i++) {
+
+ sa = &sa_ctx->sa[i];
+ if (!sa->spi)
+ continue;
+
+ ips = ipsec_get_primary_session(sa);
+ if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
+ ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ continue;
+
+ if (!rte_eth_dev_is_valid_port(sa->portid))
+ continue;
+
+ ret = rte_security_session_destroy(
+ rte_eth_dev_get_sec_ctx(sa->portid),
+ ips->security.ses);
+ if (ret)
+ RTE_LOG(ERR, IPSEC, "Failed to destroy security "
+ "session type %d, spi %d\n",
+ ips->type, sa->spi);
+ }
+}
+
+static uint32_t
+calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq,
+ uint32_t nb_txq)
+{
+ return RTE_MAX((nb_rxq * nb_rxd +
+ nb_ports * nb_lcores * MAX_PKT_BURST +
+ nb_ports * nb_txq * nb_txd +
+ nb_lcores * MEMPOOL_CACHE_SIZE +
+ nb_crypto_qp * CDEV_QUEUE_DESC +
+ nb_lcores * frag_tbl_sz *
+ FRAG_TBL_BUCKET_ENTRIES),
+ 8192U);
+}
+