+static inline int
+inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
+{
+ struct ipsec_sa *sa;
+
+ /* For inline protocol processing, the metadata in the event will
+ * uniquely identify the security session which raised the event.
+ * Application would then need the userdata it had registered with the
+ * security session to process the event.
+ */
+
+ sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
+
+ if (sa == NULL) {
+ /* userdata could not be retrieved */
+ return -1;
+ }
+
+ /* Sequence number over flow. SA need to be re-established */
+ RTE_SET_USED(sa);
+ return 0;
+}
+
+static int
+inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
+ void *param, void *ret_param)
+{
+ uint64_t md;
+ struct rte_eth_event_ipsec_desc *event_desc = NULL;
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(port_id);
+
+ RTE_SET_USED(param);
+
+ if (type != RTE_ETH_EVENT_IPSEC)
+ return -1;
+
+ event_desc = ret_param;
+ if (event_desc == NULL) {
+ printf("Event descriptor not set\n");
+ return -1;
+ }
+
+ md = event_desc->metadata;
+
+ if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
+ return inline_ipsec_event_esn_overflow(ctx, md);
+ else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
+ printf("Invalid IPsec event reported\n");
+ return -1;
+ }
+
+ return -1;
+}
+
+static uint16_t
+rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkt[], uint16_t nb_pkts,
+ __rte_unused uint16_t max_pkts, void *user_param)
+{
+ uint64_t tm;
+ uint32_t i, k;
+ struct lcore_conf *lc;
+ struct rte_mbuf *mb;
+ struct rte_ether_hdr *eth;
+
+ lc = user_param;
+ k = 0;
+ tm = 0;
+
+ for (i = 0; i != nb_pkts; i++) {
+
+ mb = pkt[i];
+ eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
+ if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+
+ struct rte_ipv4_hdr *iph;
+
+ iph = (struct rte_ipv4_hdr *)(eth + 1);
+ if (rte_ipv4_frag_pkt_is_fragmented(iph)) {
+
+ mb->l2_len = sizeof(*eth);
+ mb->l3_len = sizeof(*iph);
+ tm = (tm != 0) ? tm : rte_rdtsc();
+ mb = rte_ipv4_frag_reassemble_packet(
+ lc->frag.tbl, &lc->frag.dr,
+ mb, tm, iph);
+
+ if (mb != NULL) {
+ /* fix ip cksum after reassemble. */
+ iph = rte_pktmbuf_mtod_offset(mb,
+ struct rte_ipv4_hdr *,
+ mb->l2_len);
+ iph->hdr_checksum = 0;
+ iph->hdr_checksum = rte_ipv4_cksum(iph);
+ }
+ }
+ } else if (eth->ether_type ==
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+
+ struct rte_ipv6_hdr *iph;
+ struct ipv6_extension_fragment *fh;
+
+ iph = (struct rte_ipv6_hdr *)(eth + 1);
+ fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
+ if (fh != NULL) {
+ mb->l2_len = sizeof(*eth);
+ mb->l3_len = (uintptr_t)fh - (uintptr_t)iph +
+ sizeof(*fh);
+ tm = (tm != 0) ? tm : rte_rdtsc();
+ mb = rte_ipv6_frag_reassemble_packet(
+ lc->frag.tbl, &lc->frag.dr,
+ mb, tm, iph, fh);
+ if (mb != NULL)
+ /* fix l3_len after reassemble. */
+ mb->l3_len = mb->l3_len - sizeof(*fh);
+ }
+ }
+
+ pkt[k] = mb;
+ k += (mb != NULL);
+ }
+
+ /* some fragments were encountered, drain death row */
+ if (tm != 0)
+ rte_ip_frag_free_death_row(&lc->frag.dr, 0);
+
+ return k;
+}
+
+
+static int
+reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)
+{
+ int32_t sid;
+ uint32_t i;
+ uint64_t frag_cycles;
+ const struct lcore_rx_queue *rxq;
+ const struct rte_eth_rxtx_callback *cb;
+
+ /* create fragment table */
+ sid = rte_lcore_to_socket_id(cid);
+ frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) /
+ NS_PER_S * frag_ttl_ns;
+
+ lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
+ FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
+ if (lc->frag.tbl == NULL) {
+ printf("%s(%u): failed to create fragment table of size: %u, "
+ "error code: %d\n",
+ __func__, cid, frag_tbl_sz, rte_errno);
+ return -ENOMEM;
+ }
+
+ /* setup reassemble RX callbacks for all queues */
+ for (i = 0; i != lc->nb_rx_queue; i++) {
+
+ rxq = lc->rx_queue_list + i;
+ cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,
+ rx_callback, lc);
+ if (cb == NULL) {
+ printf("%s(%u): failed to install RX callback for "
+ "portid=%u, queueid=%u, error code: %d\n",
+ __func__, cid,
+ rxq->port_id, rxq->queue_id, rte_errno);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int
+reassemble_init(void)
+{
+ int32_t rc;
+ uint32_t i, lc;
+
+ rc = 0;
+ for (i = 0; i != nb_lcore_params; i++) {
+ lc = lcore_params[i].lcore_id;
+ rc = reassemble_lcore_init(lcore_conf + lc, lc);
+ if (rc != 0)
+ break;
+ }
+
+ return rc;
+}
+
+static void
+create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
+{
+ struct rte_flow_action action[2];
+ struct rte_flow_item pattern[2];
+ struct rte_flow_attr attr = {0};
+ struct rte_flow_error err;
+ struct rte_flow *flow;
+ int ret;
+
+ if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ return;
+
+ /* Add the default rte_flow to enable SECURITY for all ESP packets */
+
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
+ pattern[0].spec = NULL;
+ pattern[0].mask = NULL;
+ pattern[0].last = NULL;
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
+
+ action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
+ action[0].conf = NULL;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ action[1].conf = NULL;
+
+ attr.ingress = 1;
+
+ ret = rte_flow_validate(port_id, &attr, pattern, action, &err);
+ if (ret)
+ return;
+
+ flow = rte_flow_create(port_id, &attr, pattern, action, &err);
+ if (flow == NULL)
+ return;
+
+ flow_info_tbl[port_id].rx_def_flow = flow;
+ RTE_LOG(INFO, IPSEC,
+ "Created default flow enabling SECURITY for all ESP traffic on port %d\n",
+ port_id);
+}
+
+static void
+signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\n\nSignal %d received, preparing to exit...\n",
+ signum);
+ force_quit = true;
+ }
+}
+
+static void
+ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
+{
+ struct rte_ipsec_session *ips;
+ int32_t i;
+
+ if (!sa || !nb_sa)
+ return;
+
+ for (i = 0; i < nb_sa; i++) {
+ ips = ipsec_get_primary_session(&sa[i]);
+ if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ rte_exit(EXIT_FAILURE, "Event mode supports only "
+ "inline protocol sessions\n");
+ }
+
+}
+
+static int32_t
+check_event_mode_params(struct eh_conf *eh_conf)
+{
+ struct eventmode_conf *em_conf = NULL;
+ struct lcore_params *params;
+ uint16_t portid;
+
+ if (!eh_conf || !eh_conf->mode_params)
+ return -EINVAL;
+
+ /* Get eventmode conf */
+ em_conf = eh_conf->mode_params;
+
+ if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL &&
+ em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) {
+ printf("error: option --event-schedule-type applies only to "
+ "event mode\n");
+ return -EINVAL;
+ }
+
+ if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
+ return 0;
+
+ /* Set schedule type to ORDERED if it wasn't explicitly set by user */
+ if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET)
+ em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
+
+ /*
+ * Event mode currently supports only inline protocol sessions.
+ * If there are other types of sessions configured then exit with
+ * error.
+ */
+ ev_mode_sess_verify(sa_in, nb_sa_in);
+ ev_mode_sess_verify(sa_out, nb_sa_out);
+
+
+ /* Option --config does not apply to event mode */
+ if (nb_lcore_params > 0) {
+ printf("error: option --config applies only to poll mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * In order to use the same port_init routine for both poll and event
+ * modes initialize lcore_params with one queue for each eth port
+ */
+ lcore_params = lcore_params_array;
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ params = &lcore_params[nb_lcore_params++];
+ params->port_id = portid;
+ params->queue_id = 0;
+ params->lcore_id = rte_get_next_lcore(0, 0, 1);
+ }
+
+ return 0;
+}
+
+static void
+inline_sessions_free(struct sa_ctx *sa_ctx)
+{
+ struct rte_ipsec_session *ips;
+ struct ipsec_sa *sa;
+ int32_t ret;
+ uint32_t i;
+
+ if (!sa_ctx)
+ return;
+
+ for (i = 0; i < sa_ctx->nb_sa; i++) {
+
+ sa = &sa_ctx->sa[i];
+ if (!sa->spi)
+ continue;
+
+ ips = ipsec_get_primary_session(sa);
+ if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
+ ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ continue;
+
+ if (!rte_eth_dev_is_valid_port(sa->portid))
+ continue;
+
+ ret = rte_security_session_destroy(
+ rte_eth_dev_get_sec_ctx(sa->portid),
+ ips->security.ses);
+ if (ret)
+ RTE_LOG(ERR, IPSEC, "Failed to destroy security "
+ "session type %d, spi %d\n",
+ ips->type, sa->spi);
+ }
+}
+
+static uint32_t
+calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq,
+ uint32_t nb_txq)
+{
+ return RTE_MAX((nb_rxq * nb_rxd +
+ nb_ports * nb_lcores * MAX_PKT_BURST +
+ nb_ports * nb_txq * nb_txd +
+ nb_lcores * MEMPOOL_CACHE_SIZE +
+ nb_crypto_qp * CDEV_QUEUE_DESC +
+ nb_lcores * frag_tbl_sz *
+ FRAG_TBL_BUCKET_ENTRIES),
+ 8192U);
+}
+