1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright (C) 2020 Marvell International Ltd.
5 #include <rte_event_eth_tx_adapter.h>
7 #include "event_helper.h"
9 #include "ipsec-secgw.h"
12 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
14 /* Save the destination port in the mbuf */
17 /* Save eth queue for Tx */
18 rte_event_eth_tx_adapter_txq_set(m, 0);
22 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
23 struct rte_security_session **sess_tbl, uint16_t size)
25 struct rte_ipsec_session *pri_sess;
32 for (i = 0; i < sa_out->nb_sa; i++) {
38 pri_sess = ipsec_get_primary_session(sa);
43 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
45 RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
50 if (sa->portid >= size) {
52 "Port id >= than table size %d, %d\n",
57 /* Use only first inline session found for a given port */
58 if (sess_tbl[sa->portid])
60 sess_tbl[sa->portid] = pri_sess->security.ses;
65 * Event mode exposes various operating modes depending on the
66 * capabilities of the event device and the operating mode
70 /* Workers registered */
71 #define IPSEC_EVENTMODE_WORKERS 1
75 * Operating parameters : non-burst - Tx internal port - driver mode
78 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
81 struct rte_security_session *sess_tbl[RTE_MAX_ETHPORTS] = { NULL };
82 unsigned int nb_rx = 0;
89 /* Check if we have links registered for this lcore */
91 /* No links registered - exit */
96 lcore_id = rte_lcore_id();
99 socket_id = rte_lcore_to_socket_id(lcore_id);
102 * Prepare security sessions table. In outbound driver mode
103 * we always use first session configured for a given port
105 prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, sess_tbl,
109 "Launching event mode worker (non-burst - Tx internal port - "
110 "driver mode) on lcore %d\n", lcore_id);
112 /* We have valid links */
114 /* Check if it's single link */
117 "Multiple links not supported. Using first link\n");
120 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
121 links[0].event_port_id);
122 while (!force_quit) {
123 /* Read packet from event queues */
124 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
125 links[0].event_port_id,
128 0 /* timeout_ticks */);
136 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
139 ipsec_event_pre_forward(pkt, port_id);
141 if (!is_unprotected_port(port_id)) {
143 if (unlikely(!sess_tbl[port_id])) {
144 rte_pktmbuf_free(pkt);
148 /* Save security session */
149 pkt->udata64 = (uint64_t) sess_tbl[port_id];
151 /* Mark the packet for Tx security offload */
152 pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
156 * Since tx internal port is available, events can be
157 * directly enqueued to the adapter and it would be
158 * internally submitted to the eth device.
160 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
161 links[0].event_port_id,
169 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
171 struct eh_app_worker_params *wrkr;
172 uint8_t nb_wrkr_param = 0;
177 /* Non-burst - Tx internal port - driver mode */
178 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
179 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
180 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
181 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
184 return nb_wrkr_param;
188 ipsec_eventmode_worker(struct eh_conf *conf)
190 struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
192 uint8_t nb_wrkr_param;
194 /* Populate l2fwd_wrkr params */
195 nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
198 * Launch correct worker after checking
199 * the event device's capabilities.
201 eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
204 int ipsec_launch_one_lcore(void *args)
206 struct eh_conf *conf;
208 conf = (struct eh_conf *)args;
210 if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
211 /* Run in poll mode */
212 ipsec_poll_mode_worker();
213 } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
214 /* Run in event mode */
215 ipsec_eventmode_worker(conf);