1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright (C) 2020 Marvell International Ltd.
6 #include <rte_event_eth_tx_adapter.h>
10 #include "event_helper.h"
12 #include "ipsec-secgw.h"
13 #include "ipsec_worker.h"
15 struct port_drv_mode_data {
16 struct rte_security_session *sess;
17 struct rte_security_ctx *ctx;
20 static inline enum pkt_type
21 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
23 struct rte_ether_hdr *eth;
25 eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
26 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
27 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
28 offsetof(struct ip, ip_p));
29 if (**nlp == IPPROTO_ESP)
30 return PKT_TYPE_IPSEC_IPV4;
32 return PKT_TYPE_PLAIN_IPV4;
33 } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
34 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
35 offsetof(struct ip6_hdr, ip6_nxt));
36 if (**nlp == IPPROTO_ESP)
37 return PKT_TYPE_IPSEC_IPV6;
39 return PKT_TYPE_PLAIN_IPV6;
42 /* Unknown/Unsupported type */
43 return PKT_TYPE_INVALID;
47 update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
49 struct rte_ether_hdr *ethhdr;
51 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
52 memcpy(ðhdr->s_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
53 memcpy(ðhdr->d_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
57 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
59 /* Save the destination port in the mbuf */
62 /* Save eth queue for Tx */
63 rte_event_eth_tx_adapter_txq_set(m, 0);
67 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
68 struct port_drv_mode_data *data,
71 struct rte_ipsec_session *pri_sess;
78 for (i = 0; i < sa_out->nb_sa; i++) {
84 pri_sess = ipsec_get_primary_session(sa);
89 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
91 RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
96 if (sa->portid >= size) {
98 "Port id >= than table size %d, %d\n",
103 /* Use only first inline session found for a given port */
104 if (data[sa->portid].sess)
106 data[sa->portid].sess = pri_sess->security.ses;
107 data[sa->portid].ctx = pri_sess->security.ctx;
112 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
116 if (unlikely(sp == NULL))
119 rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
120 DEFAULT_MAX_CATEGORIES);
122 if (unlikely(res == DISCARD))
124 else if (res == BYPASS) {
133 static inline uint16_t
134 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
141 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
142 dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
143 dst_ip = rte_be_to_cpu_32(dst_ip);
145 ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
153 return RTE_MAX_ETHPORTS;
156 /* TODO: To be tested */
157 static inline uint16_t
158 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
166 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
167 ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
168 memcpy(&dst_ip[0], ip6_dst, 16);
170 ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
178 return RTE_MAX_ETHPORTS;
181 static inline uint16_t
182 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
184 if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
185 return route4_pkt(pkt, rt->rt4_ctx);
186 else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
187 return route6_pkt(pkt, rt->rt6_ctx);
189 return RTE_MAX_ETHPORTS;
193 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
194 struct rte_event *ev)
196 struct ipsec_sa *sa = NULL;
197 struct rte_mbuf *pkt;
198 uint16_t port_id = 0;
203 /* Get pkt from event */
206 /* Check the packet type */
207 type = process_ipsec_get_pkt_type(pkt, &nlp);
210 case PKT_TYPE_PLAIN_IPV4:
211 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
212 if (unlikely(pkt->ol_flags &
213 PKT_RX_SEC_OFFLOAD_FAILED)) {
215 "Inbound security offload failed\n");
216 goto drop_pkt_and_exit;
218 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
221 /* Check if we have a match */
222 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
224 goto drop_pkt_and_exit;
228 case PKT_TYPE_PLAIN_IPV6:
229 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
230 if (unlikely(pkt->ol_flags &
231 PKT_RX_SEC_OFFLOAD_FAILED)) {
233 "Inbound security offload failed\n");
234 goto drop_pkt_and_exit;
236 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
239 /* Check if we have a match */
240 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
242 goto drop_pkt_and_exit;
247 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
248 goto drop_pkt_and_exit;
251 /* Check if the packet has to be bypassed */
252 if (sa_idx == BYPASS)
253 goto route_and_send_pkt;
255 /* Validate sa_idx */
256 if (sa_idx >= ctx->sa_ctx->nb_sa)
257 goto drop_pkt_and_exit;
259 /* Else the packet has to be protected with SA */
261 /* If the packet was IPsec processed, then SA pointer should be set */
263 goto drop_pkt_and_exit;
265 /* SPI on the packet should match with the one in SA */
266 if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
267 goto drop_pkt_and_exit;
270 port_id = get_route(pkt, rt, type);
271 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
273 goto drop_pkt_and_exit;
275 /* else, we have a matching route */
277 /* Update mac addresses */
278 update_mac_addrs(pkt, port_id);
280 /* Update the event with the dest port */
281 ipsec_event_pre_forward(pkt, port_id);
282 return PKT_FORWARDED;
285 RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
286 rte_pktmbuf_free(pkt);
292 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
293 struct rte_event *ev)
295 struct rte_ipsec_session *sess;
296 struct sa_ctx *sa_ctx;
297 struct rte_mbuf *pkt;
298 uint16_t port_id = 0;
304 /* Get pkt from event */
307 /* Check the packet type */
308 type = process_ipsec_get_pkt_type(pkt, &nlp);
311 case PKT_TYPE_PLAIN_IPV4:
312 /* Check if we have a match */
313 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
315 goto drop_pkt_and_exit;
318 case PKT_TYPE_PLAIN_IPV6:
319 /* Check if we have a match */
320 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
322 goto drop_pkt_and_exit;
327 * Only plain IPv4 & IPv6 packets are allowed
328 * on protected port. Drop the rest.
330 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
331 goto drop_pkt_and_exit;
334 /* Check if the packet has to be bypassed */
335 if (sa_idx == BYPASS) {
336 port_id = get_route(pkt, rt, type);
337 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
339 goto drop_pkt_and_exit;
341 /* else, we have a matching route */
345 /* Validate sa_idx */
346 if (sa_idx >= ctx->sa_ctx->nb_sa)
347 goto drop_pkt_and_exit;
349 /* Else the packet has to be protected */
352 sa_ctx = ctx->sa_ctx;
355 sa = &(sa_ctx->sa[sa_idx]);
357 /* Get IPsec session */
358 sess = ipsec_get_primary_session(sa);
360 /* Allow only inline protocol for now */
361 if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
362 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
363 goto drop_pkt_and_exit;
366 rte_security_set_pkt_metadata(sess->security.ctx,
367 sess->security.ses, pkt, NULL);
369 /* Mark the packet for Tx security offload */
370 pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
372 /* Get the port to which this pkt need to be submitted */
373 port_id = sa->portid;
376 /* Provide L2 len for Outbound processing */
377 pkt->l2_len = RTE_ETHER_HDR_LEN;
379 /* Update mac addresses */
380 update_mac_addrs(pkt, port_id);
382 /* Update the event with the dest port */
383 ipsec_event_pre_forward(pkt, port_id);
384 return PKT_FORWARDED;
387 RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
388 rte_pktmbuf_free(pkt);
394 * Event mode exposes various operating modes depending on the
395 * capabilities of the event device and the operating mode
399 /* Workers registered */
400 #define IPSEC_EVENTMODE_WORKERS 2
404 * Operating parameters : non-burst - Tx internal port - driver mode
407 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
410 struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
411 unsigned int nb_rx = 0;
412 struct rte_mbuf *pkt;
418 /* Check if we have links registered for this lcore */
420 /* No links registered - exit */
424 memset(&data, 0, sizeof(struct port_drv_mode_data));
427 lcore_id = rte_lcore_id();
430 socket_id = rte_lcore_to_socket_id(lcore_id);
433 * Prepare security sessions table. In outbound driver mode
434 * we always use first session configured for a given port
436 prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
440 "Launching event mode worker (non-burst - Tx internal port - "
441 "driver mode) on lcore %d\n", lcore_id);
443 /* We have valid links */
445 /* Check if it's single link */
448 "Multiple links not supported. Using first link\n");
451 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
452 links[0].event_port_id);
453 while (!force_quit) {
454 /* Read packet from event queues */
455 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
456 links[0].event_port_id,
459 0 /* timeout_ticks */);
467 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
470 ipsec_event_pre_forward(pkt, port_id);
472 if (!is_unprotected_port(port_id)) {
474 if (unlikely(!data[port_id].sess)) {
475 rte_pktmbuf_free(pkt);
479 /* Save security session */
480 rte_security_set_pkt_metadata(data[port_id].ctx,
481 data[port_id].sess, pkt,
484 /* Mark the packet for Tx security offload */
485 pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
487 /* Provide L2 len for Outbound processing */
488 pkt->l2_len = RTE_ETHER_HDR_LEN;
492 * Since tx internal port is available, events can be
493 * directly enqueued to the adapter and it would be
494 * internally submitted to the eth device.
496 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
497 links[0].event_port_id,
506 * Operating parameters : non-burst - Tx internal port - app mode
509 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
512 struct lcore_conf_ev_tx_int_port_wrkr lconf;
513 unsigned int nb_rx = 0;
519 /* Check if we have links registered for this lcore */
521 /* No links registered - exit */
525 /* We have valid links */
528 lcore_id = rte_lcore_id();
531 socket_id = rte_lcore_to_socket_id(lcore_id);
533 /* Save routing table */
534 lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
535 lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
536 lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
537 lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
538 lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
539 lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
540 lconf.inbound.session_priv_pool =
541 socket_ctx[socket_id].session_priv_pool;
542 lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
543 lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
544 lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
545 lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
546 lconf.outbound.session_priv_pool =
547 socket_ctx[socket_id].session_priv_pool;
550 "Launching event mode worker (non-burst - Tx internal port - "
551 "app mode) on lcore %d\n", lcore_id);
553 /* Check if it's single link */
556 "Multiple links not supported. Using first link\n");
559 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
560 links[0].event_port_id);
562 while (!force_quit) {
563 /* Read packet from event queues */
564 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
565 links[0].event_port_id,
568 0 /* timeout_ticks */);
573 if (unlikely(ev.event_type != RTE_EVENT_TYPE_ETHDEV)) {
574 RTE_LOG(ERR, IPSEC, "Invalid event type %u",
580 if (is_unprotected_port(ev.mbuf->port))
581 ret = process_ipsec_ev_inbound(&lconf.inbound,
584 ret = process_ipsec_ev_outbound(&lconf.outbound,
587 /* The pkt has been dropped */
591 * Since tx internal port is available, events can be
592 * directly enqueued to the adapter and it would be
593 * internally submitted to the eth device.
595 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
596 links[0].event_port_id,
604 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
606 struct eh_app_worker_params *wrkr;
607 uint8_t nb_wrkr_param = 0;
612 /* Non-burst - Tx internal port - driver mode */
613 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
614 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
615 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
616 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
620 /* Non-burst - Tx internal port - app mode */
621 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
622 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
623 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
624 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
627 return nb_wrkr_param;
631 ipsec_eventmode_worker(struct eh_conf *conf)
633 struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
635 uint8_t nb_wrkr_param;
637 /* Populate l2fwd_wrkr params */
638 nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
641 * Launch correct worker after checking
642 * the event device's capabilities.
644 eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
647 int ipsec_launch_one_lcore(void *args)
649 struct eh_conf *conf;
651 conf = (struct eh_conf *)args;
653 if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
654 /* Run in poll mode */
655 ipsec_poll_mode_worker();
656 } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
657 /* Run in event mode */
658 ipsec_eventmode_worker(conf);