1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright (C) 2020 Marvell International Ltd.
6 #include <rte_event_eth_tx_adapter.h>
10 #include "event_helper.h"
12 #include "ipsec-secgw.h"
13 #include "ipsec_worker.h"
15 struct port_drv_mode_data {
16 struct rte_security_session *sess;
17 struct rte_security_ctx *ctx;
20 typedef void (*ipsec_worker_fn_t)(void);
22 static inline enum pkt_type
23 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
25 struct rte_ether_hdr *eth;
26 uint32_t ptype = pkt->packet_type;
28 eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
31 if (RTE_ETH_IS_IPV4_HDR(ptype)) {
32 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
33 offsetof(struct ip, ip_p));
34 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
35 return PKT_TYPE_IPSEC_IPV4;
37 return PKT_TYPE_PLAIN_IPV4;
38 } else if (RTE_ETH_IS_IPV6_HDR(ptype)) {
39 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
40 offsetof(struct ip6_hdr, ip6_nxt));
41 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
42 return PKT_TYPE_IPSEC_IPV6;
44 return PKT_TYPE_PLAIN_IPV6;
47 /* Unknown/Unsupported type */
48 return PKT_TYPE_INVALID;
52 update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
54 struct rte_ether_hdr *ethhdr;
56 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
57 memcpy(ðhdr->src_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
58 memcpy(ðhdr->dst_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
62 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
64 /* Save the destination port in the mbuf */
67 /* Save eth queue for Tx */
68 rte_event_eth_tx_adapter_txq_set(m, 0);
72 ev_vector_attr_init(struct rte_event_vector *vec)
80 ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt)
82 if (vec->port == 0xFFFF) {
83 vec->port = pkt->port;
86 if (vec->attr_valid && (vec->port != pkt->port))
91 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
92 struct port_drv_mode_data *data,
95 struct rte_ipsec_session *pri_sess;
102 for (i = 0; i < sa_out->nb_sa; i++) {
108 pri_sess = ipsec_get_primary_session(sa);
112 if (pri_sess->type !=
113 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
115 RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
120 if (sa->portid >= size) {
122 "Port id >= than table size %d, %d\n",
127 /* Use only first inline session found for a given port */
128 if (data[sa->portid].sess)
130 data[sa->portid].sess = pri_sess->security.ses;
131 data[sa->portid].ctx = pri_sess->security.ctx;
136 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
140 if (unlikely(sp == NULL))
143 rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
144 DEFAULT_MAX_CATEGORIES);
146 if (unlikely(res == DISCARD))
148 else if (res == BYPASS) {
158 check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
159 struct traffic_type *ipsec)
164 if (unlikely(sp == NULL || ip->num == 0))
167 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
168 DEFAULT_MAX_CATEGORIES);
171 for (i = 0; i < ip->num; i++) {
174 if (unlikely(res == DISCARD))
176 else if (res == BYPASS)
179 ipsec->res[ipsec->num] = res - 1;
180 ipsec->pkts[ipsec->num++] = m;
187 check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
188 struct traffic_type *ip)
194 if (unlikely(sp == NULL || ip->num == 0))
197 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
198 DEFAULT_MAX_CATEGORIES);
201 for (i = 0; i < ip->num; i++) {
204 if (unlikely(res == DISCARD))
206 else if (res == BYPASS)
209 sa = *(struct ipsec_sa **)rte_security_dynfield(m);
215 /* SPI on the packet should match with the one in SA */
216 if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) {
227 static inline uint16_t
228 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
235 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
236 dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
237 dst_ip = rte_be_to_cpu_32(dst_ip);
239 ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
247 return RTE_MAX_ETHPORTS;
250 /* TODO: To be tested */
251 static inline uint16_t
252 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
260 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
261 ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
262 memcpy(&dst_ip[0], ip6_dst, 16);
264 ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
272 return RTE_MAX_ETHPORTS;
275 static inline uint16_t
276 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
278 if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
279 return route4_pkt(pkt, rt->rt4_ctx);
280 else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
281 return route6_pkt(pkt, rt->rt6_ctx);
283 return RTE_MAX_ETHPORTS;
287 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
288 struct rte_event *ev)
290 struct ipsec_sa *sa = NULL;
291 struct rte_mbuf *pkt;
292 uint16_t port_id = 0;
297 /* Get pkt from event */
300 /* Check the packet type */
301 type = process_ipsec_get_pkt_type(pkt, &nlp);
304 case PKT_TYPE_PLAIN_IPV4:
305 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
306 if (unlikely(pkt->ol_flags &
307 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
309 "Inbound security offload failed\n");
310 goto drop_pkt_and_exit;
312 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
315 /* Check if we have a match */
316 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
318 goto drop_pkt_and_exit;
322 case PKT_TYPE_PLAIN_IPV6:
323 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
324 if (unlikely(pkt->ol_flags &
325 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
327 "Inbound security offload failed\n");
328 goto drop_pkt_and_exit;
330 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
333 /* Check if we have a match */
334 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
336 goto drop_pkt_and_exit;
341 RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
343 goto drop_pkt_and_exit;
346 /* Check if the packet has to be bypassed */
347 if (sa_idx == BYPASS)
348 goto route_and_send_pkt;
350 /* Validate sa_idx */
351 if (sa_idx >= ctx->sa_ctx->nb_sa)
352 goto drop_pkt_and_exit;
354 /* Else the packet has to be protected with SA */
356 /* If the packet was IPsec processed, then SA pointer should be set */
358 goto drop_pkt_and_exit;
360 /* SPI on the packet should match with the one in SA */
361 if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
362 goto drop_pkt_and_exit;
365 port_id = get_route(pkt, rt, type);
366 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
368 goto drop_pkt_and_exit;
370 /* else, we have a matching route */
372 /* Update mac addresses */
373 update_mac_addrs(pkt, port_id);
375 /* Update the event with the dest port */
376 ipsec_event_pre_forward(pkt, port_id);
377 return PKT_FORWARDED;
380 RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
381 rte_pktmbuf_free(pkt);
387 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
388 struct rte_event *ev)
390 struct rte_ipsec_session *sess;
391 struct sa_ctx *sa_ctx;
392 struct rte_mbuf *pkt;
393 uint16_t port_id = 0;
399 /* Get pkt from event */
402 /* Check the packet type */
403 type = process_ipsec_get_pkt_type(pkt, &nlp);
406 case PKT_TYPE_PLAIN_IPV4:
407 /* Check if we have a match */
408 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
410 goto drop_pkt_and_exit;
413 case PKT_TYPE_PLAIN_IPV6:
414 /* Check if we have a match */
415 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
417 goto drop_pkt_and_exit;
422 * Only plain IPv4 & IPv6 packets are allowed
423 * on protected port. Drop the rest.
425 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
426 goto drop_pkt_and_exit;
429 /* Check if the packet has to be bypassed */
430 if (sa_idx == BYPASS) {
431 port_id = get_route(pkt, rt, type);
432 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
434 goto drop_pkt_and_exit;
436 /* else, we have a matching route */
440 /* Validate sa_idx */
441 if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
442 goto drop_pkt_and_exit;
444 /* Else the packet has to be protected */
447 sa_ctx = ctx->sa_ctx;
450 sa = &(sa_ctx->sa[sa_idx]);
452 /* Get IPsec session */
453 sess = ipsec_get_primary_session(sa);
455 /* Allow only inline protocol for now */
456 if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
457 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
458 goto drop_pkt_and_exit;
461 rte_security_set_pkt_metadata(sess->security.ctx,
462 sess->security.ses, pkt, NULL);
464 /* Mark the packet for Tx security offload */
465 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
467 /* Get the port to which this pkt need to be submitted */
468 port_id = sa->portid;
471 /* Provide L2 len for Outbound processing */
472 pkt->l2_len = RTE_ETHER_HDR_LEN;
474 /* Update mac addresses */
475 update_mac_addrs(pkt, port_id);
477 /* Update the event with the dest port */
478 ipsec_event_pre_forward(pkt, port_id);
479 return PKT_FORWARDED;
482 RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
483 rte_pktmbuf_free(pkt);
489 ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
490 struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
492 struct rte_ipsec_session *sess;
493 uint32_t sa_idx, i, j = 0;
494 uint16_t port_id = 0;
495 struct rte_mbuf *pkt;
498 /* Route IPv4 packets */
499 for (i = 0; i < t->ip4.num; i++) {
500 pkt = t->ip4.pkts[i];
501 port_id = route4_pkt(pkt, rt->rt4_ctx);
502 if (port_id != RTE_MAX_ETHPORTS) {
503 /* Update mac addresses */
504 update_mac_addrs(pkt, port_id);
505 /* Update the event with the dest port */
506 ipsec_event_pre_forward(pkt, port_id);
507 ev_vector_attr_update(vec, pkt);
508 vec->mbufs[j++] = pkt;
513 /* Route IPv6 packets */
514 for (i = 0; i < t->ip6.num; i++) {
515 pkt = t->ip6.pkts[i];
516 port_id = route6_pkt(pkt, rt->rt6_ctx);
517 if (port_id != RTE_MAX_ETHPORTS) {
518 /* Update mac addresses */
519 update_mac_addrs(pkt, port_id);
520 /* Update the event with the dest port */
521 ipsec_event_pre_forward(pkt, port_id);
522 ev_vector_attr_update(vec, pkt);
523 vec->mbufs[j++] = pkt;
528 /* Route ESP packets */
529 for (i = 0; i < t->ipsec.num; i++) {
530 /* Validate sa_idx */
531 sa_idx = t->ipsec.res[i];
532 pkt = t->ipsec.pkts[i];
533 if (unlikely(sa_idx >= sa_ctx->nb_sa))
536 /* Else the packet has to be protected */
537 sa = &(sa_ctx->sa[sa_idx]);
538 /* Get IPsec session */
539 sess = ipsec_get_primary_session(sa);
540 /* Allow only inline protocol for now */
541 if (unlikely(sess->type !=
542 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
543 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
547 rte_security_set_pkt_metadata(sess->security.ctx,
548 sess->security.ses, pkt, NULL);
550 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
551 port_id = sa->portid;
552 update_mac_addrs(pkt, port_id);
553 ipsec_event_pre_forward(pkt, port_id);
554 ev_vector_attr_update(vec, pkt);
555 vec->mbufs[j++] = pkt;
563 classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
568 /* Check the packet type */
569 type = process_ipsec_get_pkt_type(pkt, &nlp);
572 case PKT_TYPE_PLAIN_IPV4:
573 t->ip4.data[t->ip4.num] = nlp;
574 t->ip4.pkts[(t->ip4.num)++] = pkt;
576 case PKT_TYPE_PLAIN_IPV6:
577 t->ip6.data[t->ip6.num] = nlp;
578 t->ip6.pkts[(t->ip6.num)++] = pkt;
581 RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
589 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
590 struct rte_event_vector *vec)
592 struct ipsec_traffic t;
593 struct rte_mbuf *pkt;
600 for (i = 0; i < vec->nb_elem; i++) {
601 /* Get pkt from event */
604 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
605 if (unlikely(pkt->ol_flags &
606 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
608 "Inbound security offload failed\n");
614 classify_pkt(pkt, &t);
617 check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
618 check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
620 return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
624 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
625 struct rte_event_vector *vec)
627 struct ipsec_traffic t;
628 struct rte_mbuf *pkt;
635 for (i = 0; i < vec->nb_elem; i++) {
636 /* Get pkt from event */
639 classify_pkt(pkt, &t);
641 /* Provide L2 len for Outbound processing */
642 pkt->l2_len = RTE_ETHER_HDR_LEN;
645 check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
646 check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
648 return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
652 process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec,
653 struct port_drv_mode_data *data)
655 struct rte_mbuf *pkt;
660 for (i = 0; i < vec->nb_elem; i++) {
664 if (unlikely(!data[port_id].sess)) {
668 ipsec_event_pre_forward(pkt, port_id);
669 /* Save security session */
670 rte_security_set_pkt_metadata(data[port_id].ctx,
671 data[port_id].sess, pkt,
674 /* Mark the packet for Tx security offload */
675 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
677 /* Provide L2 len for Outbound processing */
678 pkt->l2_len = RTE_ETHER_HDR_LEN;
680 vec->mbufs[j++] = pkt;
687 ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
688 struct eh_event_link_info *links,
689 struct rte_event *ev)
691 struct rte_event_vector *vec = ev->vec;
692 struct rte_mbuf *pkt;
697 ev_vector_attr_init(vec);
698 if (is_unprotected_port(pkt->port))
699 ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
702 ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
705 if (likely(ret > 0)) {
707 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
708 links[0].event_port_id,
711 rte_mempool_put(rte_mempool_from_obj(vec), vec);
716 ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
717 struct rte_event *ev,
718 struct port_drv_mode_data *data)
720 struct rte_event_vector *vec = ev->vec;
721 struct rte_mbuf *pkt;
725 if (!is_unprotected_port(pkt->port))
726 vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
728 if (vec->nb_elem > 0)
729 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
730 links[0].event_port_id,
733 rte_mempool_put(rte_mempool_from_obj(vec), vec);
737 * Event mode exposes various operating modes depending on the
738 * capabilities of the event device and the operating mode
743 ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev,
744 void *args __rte_unused)
746 rte_pktmbuf_free(ev.mbuf);
749 /* Workers registered */
750 #define IPSEC_EVENTMODE_WORKERS 2
754 * Operating parameters : non-burst - Tx internal port - driver mode
757 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
760 struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
761 unsigned int nb_rx = 0, nb_tx;
762 struct rte_mbuf *pkt;
768 /* Check if we have links registered for this lcore */
770 /* No links registered - exit */
774 memset(&data, 0, sizeof(struct port_drv_mode_data));
777 lcore_id = rte_lcore_id();
780 socket_id = rte_lcore_to_socket_id(lcore_id);
783 * Prepare security sessions table. In outbound driver mode
784 * we always use first session configured for a given port
786 prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
790 "Launching event mode worker (non-burst - Tx internal port - "
791 "driver mode) on lcore %d\n", lcore_id);
793 /* We have valid links */
795 /* Check if it's single link */
798 "Multiple links not supported. Using first link\n");
801 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
802 links[0].event_port_id);
803 while (!force_quit) {
804 /* Read packet from event queues */
805 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
806 links[0].event_port_id,
809 0 /* timeout_ticks */);
814 switch (ev.event_type) {
815 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
816 case RTE_EVENT_TYPE_ETHDEV_VECTOR:
817 ipsec_ev_vector_drv_mode_process(links, &ev, data);
819 case RTE_EVENT_TYPE_ETHDEV:
822 RTE_LOG(ERR, IPSEC, "Invalid event type %u",
830 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
833 ipsec_event_pre_forward(pkt, port_id);
835 if (!is_unprotected_port(port_id)) {
837 if (unlikely(!data[port_id].sess)) {
838 rte_pktmbuf_free(pkt);
842 /* Save security session */
843 rte_security_set_pkt_metadata(data[port_id].ctx,
844 data[port_id].sess, pkt,
847 /* Mark the packet for Tx security offload */
848 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
850 /* Provide L2 len for Outbound processing */
851 pkt->l2_len = RTE_ETHER_HDR_LEN;
855 * Since tx internal port is available, events can be
856 * directly enqueued to the adapter and it would be
857 * internally submitted to the eth device.
859 nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
860 links[0].event_port_id,
865 rte_pktmbuf_free(ev.mbuf);
869 ev.op = RTE_EVENT_OP_RELEASE;
870 rte_event_enqueue_burst(links[0].eventdev_id,
871 links[0].event_port_id, &ev, 1);
874 rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
875 ipsec_event_port_flush, NULL);
880 * Operating parameters : non-burst - Tx internal port - app mode
883 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
886 struct lcore_conf_ev_tx_int_port_wrkr lconf;
887 unsigned int nb_rx = 0, nb_tx;
893 /* Check if we have links registered for this lcore */
895 /* No links registered - exit */
899 /* We have valid links */
902 lcore_id = rte_lcore_id();
905 socket_id = rte_lcore_to_socket_id(lcore_id);
907 /* Save routing table */
908 lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
909 lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
910 lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
911 lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
912 lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
913 lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
914 lconf.inbound.session_priv_pool =
915 socket_ctx[socket_id].session_priv_pool;
916 lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
917 lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
918 lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
919 lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
920 lconf.outbound.session_priv_pool =
921 socket_ctx[socket_id].session_priv_pool;
924 "Launching event mode worker (non-burst - Tx internal port - "
925 "app mode) on lcore %d\n", lcore_id);
927 /* Check if it's single link */
930 "Multiple links not supported. Using first link\n");
933 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
934 links[0].event_port_id);
936 while (!force_quit) {
937 /* Read packet from event queues */
938 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
939 links[0].event_port_id,
942 0 /* timeout_ticks */);
947 switch (ev.event_type) {
948 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
949 case RTE_EVENT_TYPE_ETHDEV_VECTOR:
950 ipsec_ev_vector_process(&lconf, links, &ev);
952 case RTE_EVENT_TYPE_ETHDEV:
955 RTE_LOG(ERR, IPSEC, "Invalid event type %u",
960 if (is_unprotected_port(ev.mbuf->port))
961 ret = process_ipsec_ev_inbound(&lconf.inbound,
964 ret = process_ipsec_ev_outbound(&lconf.outbound,
967 /* The pkt has been dropped */
971 * Since tx internal port is available, events can be
972 * directly enqueued to the adapter and it would be
973 * internally submitted to the eth device.
975 nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
976 links[0].event_port_id,
981 rte_pktmbuf_free(ev.mbuf);
985 ev.op = RTE_EVENT_OP_RELEASE;
986 rte_event_enqueue_burst(links[0].eventdev_id,
987 links[0].event_port_id, &ev, 1);
990 rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
991 ipsec_event_port_flush, NULL);
995 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
997 struct eh_app_worker_params *wrkr;
998 uint8_t nb_wrkr_param = 0;
1003 /* Non-burst - Tx internal port - driver mode */
1004 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1005 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1006 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1007 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
1011 /* Non-burst - Tx internal port - app mode */
1012 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1013 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1014 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1015 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
1018 return nb_wrkr_param;
1022 ipsec_eventmode_worker(struct eh_conf *conf)
1024 struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
1026 uint8_t nb_wrkr_param;
1028 /* Populate l2fwd_wrkr params */
1029 nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
1032 * Launch correct worker after checking
1033 * the event device's capabilities.
1035 eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
1038 static __rte_always_inline void
1039 outb_inl_pro_spd_process(struct sp_ctx *sp,
1040 struct sa_ctx *sa_ctx,
1041 struct traffic_type *ip,
1042 struct traffic_type *match,
1043 struct traffic_type *mismatch,
1045 struct ipsec_spd_stats *stats)
1047 uint32_t prev_sa_idx = UINT32_MAX;
1048 struct rte_mbuf *ipsec[MAX_PKT_BURST];
1049 struct rte_ipsec_session *ips;
1050 uint32_t i, j, j_mis, sa_idx;
1051 struct ipsec_sa *sa = NULL;
1052 uint32_t ipsec_num = 0;
1056 if (ip->num == 0 || sp == NULL)
1059 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
1060 ip->num, DEFAULT_MAX_CATEGORIES);
1063 j_mis = mismatch->num;
1065 for (i = 0; i < ip->num; i++) {
1067 sa_idx = ip->res[i] - 1;
1069 if (unlikely(ip->res[i] == DISCARD)) {
1073 } else if (unlikely(ip->res[i] == BYPASS)) {
1074 match->pkts[j++] = m;
1078 if (prev_sa_idx == UINT32_MAX) {
1079 prev_sa_idx = sa_idx;
1080 sa = &sa_ctx->sa[sa_idx];
1081 ips = ipsec_get_primary_session(sa);
1082 satp = rte_ipsec_sa_type(ips->sa);
1085 if (sa_idx != prev_sa_idx) {
1086 prep_process_group(sa, ipsec, ipsec_num);
1088 /* Prepare packets for outbound */
1089 rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
1091 /* Copy to current tr or a different tr */
1092 if (SATP_OUT_IPV4(satp) == match_flag) {
1093 memcpy(&match->pkts[j], ipsec,
1094 ipsec_num * sizeof(void *));
1097 memcpy(&mismatch->pkts[j_mis], ipsec,
1098 ipsec_num * sizeof(void *));
1102 /* Update to new SA */
1103 sa = &sa_ctx->sa[sa_idx];
1104 ips = ipsec_get_primary_session(sa);
1105 satp = rte_ipsec_sa_type(ips->sa);
1109 ipsec[ipsec_num++] = m;
1115 prep_process_group(sa, ipsec, ipsec_num);
1117 /* Prepare pacekts for outbound */
1118 rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
1120 /* Copy to current tr or a different tr */
1121 if (SATP_OUT_IPV4(satp) == match_flag) {
1122 memcpy(&match->pkts[j], ipsec,
1123 ipsec_num * sizeof(void *));
1126 memcpy(&mismatch->pkts[j_mis], ipsec,
1127 ipsec_num * sizeof(void *));
1132 mismatch->num = j_mis;
1135 /* Poll mode worker when all SA's are of type inline protocol */
1137 ipsec_poll_mode_wrkr_inl_pr(void)
1139 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1140 / US_PER_S * BURST_TX_DRAIN_US;
1141 struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out;
1142 struct rte_mbuf *pkts[MAX_PKT_BURST];
1143 uint64_t prev_tsc, diff_tsc, cur_tsc;
1144 struct ipsec_core_statistics *stats;
1145 struct rt_ctx *rt4_ctx, *rt6_ctx;
1146 struct sa_ctx *sa_in, *sa_out;
1147 struct traffic_type ip4, ip6;
1148 struct lcore_rx_queue *rxql;
1149 struct rte_mbuf **v4, **v6;
1150 struct ipsec_traffic trf;
1151 struct lcore_conf *qconf;
1152 uint16_t v4_num, v6_num;
1160 lcore_id = rte_lcore_id();
1161 qconf = &lcore_conf[lcore_id];
1162 rxql = qconf->rx_queue_list;
1163 socket_id = rte_lcore_to_socket_id(lcore_id);
1164 stats = &core_statistics[lcore_id];
1166 rt4_ctx = socket_ctx[socket_id].rt_ip4;
1167 rt6_ctx = socket_ctx[socket_id].rt_ip6;
1169 sp4_in = socket_ctx[socket_id].sp_ip4_in;
1170 sp6_in = socket_ctx[socket_id].sp_ip6_in;
1171 sa_in = socket_ctx[socket_id].sa_in;
1173 sp4_out = socket_ctx[socket_id].sp_ip4_out;
1174 sp6_out = socket_ctx[socket_id].sp_ip6_out;
1175 sa_out = socket_ctx[socket_id].sa_out;
1177 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1179 if (qconf->nb_rx_queue == 0) {
1180 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1185 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1187 for (i = 0; i < qconf->nb_rx_queue; i++) {
1188 portid = rxql[i].port_id;
1189 queueid = rxql[i].queue_id;
1190 RTE_LOG(INFO, IPSEC,
1191 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1192 lcore_id, portid, queueid);
1195 while (!force_quit) {
1196 cur_tsc = rte_rdtsc();
1198 /* TX queue buffer drain */
1199 diff_tsc = cur_tsc - prev_tsc;
1201 if (unlikely(diff_tsc > drain_tsc)) {
1202 drain_tx_buffers(qconf);
1206 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1207 /* Read packets from RX queues */
1208 portid = rxql[i].port_id;
1209 queueid = rxql[i].queue_id;
1210 nb_rx = rte_eth_rx_burst(portid, queueid,
1211 pkts, MAX_PKT_BURST);
1216 core_stats_update_rx(nb_rx);
1218 prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
1220 /* Drop any IPsec traffic */
1221 free_pkts(trf.ipsec.pkts, trf.ipsec.num);
1223 if (is_unprotected_port(portid)) {
1224 inbound_sp_sa(sp4_in, sa_in, &trf.ip4,
1226 &stats->inbound.spd4);
1228 inbound_sp_sa(sp6_in, sa_in, &trf.ip6,
1230 &stats->inbound.spd6);
1233 v4_num = trf.ip4.num;
1235 v6_num = trf.ip6.num;
1240 outb_inl_pro_spd_process(sp4_out, sa_out,
1241 &trf.ip4, &ip4, &ip6,
1243 &stats->outbound.spd4);
1245 outb_inl_pro_spd_process(sp6_out, sa_out,
1246 &trf.ip6, &ip6, &ip4,
1248 &stats->outbound.spd6);
1255 route4_pkts(rt4_ctx, v4, v4_num, 0, false);
1256 route6_pkts(rt6_ctx, v6, v6_num);
1261 /* Poll mode worker when all SA's are of type inline protocol
1262 * and single sa mode is enabled.
1265 ipsec_poll_mode_wrkr_inl_pr_ss(void)
1267 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1268 / US_PER_S * BURST_TX_DRAIN_US;
1269 uint16_t sa_out_portid = 0, sa_out_proto = 0;
1270 struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
1271 uint64_t prev_tsc, diff_tsc, cur_tsc;
1272 struct rte_ipsec_session *ips = NULL;
1273 struct lcore_rx_queue *rxql;
1274 struct ipsec_sa *sa = NULL;
1275 struct lcore_conf *qconf;
1276 struct sa_ctx *sa_out;
1277 uint32_t i, nb_rx, j;
1284 lcore_id = rte_lcore_id();
1285 qconf = &lcore_conf[lcore_id];
1286 rxql = qconf->rx_queue_list;
1287 socket_id = rte_lcore_to_socket_id(lcore_id);
1290 sa_out = socket_ctx[socket_id].sa_out;
1291 if (sa_out && single_sa_idx < sa_out->nb_sa) {
1292 sa = &sa_out->sa[single_sa_idx];
1293 ips = ipsec_get_primary_session(sa);
1294 sa_out_portid = sa->portid;
1295 if (sa->flags & IP6_TUNNEL)
1296 sa_out_proto = IPPROTO_IPV6;
1298 sa_out_proto = IPPROTO_IP;
1301 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1303 if (qconf->nb_rx_queue == 0) {
1304 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1309 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1311 for (i = 0; i < qconf->nb_rx_queue; i++) {
1312 portid = rxql[i].port_id;
1313 queueid = rxql[i].queue_id;
1314 RTE_LOG(INFO, IPSEC,
1315 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1316 lcore_id, portid, queueid);
1319 while (!force_quit) {
1320 cur_tsc = rte_rdtsc();
1322 /* TX queue buffer drain */
1323 diff_tsc = cur_tsc - prev_tsc;
1325 if (unlikely(diff_tsc > drain_tsc)) {
1326 drain_tx_buffers(qconf);
1330 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1331 /* Read packets from RX queues */
1332 portid = rxql[i].port_id;
1333 queueid = rxql[i].queue_id;
1334 nb_rx = rte_eth_rx_burst(portid, queueid,
1335 pkts, MAX_PKT_BURST);
1340 core_stats_update_rx(nb_rx);
1342 if (is_unprotected_port(portid)) {
1343 /* Nothing much to do for inbound inline
1344 * decrypted traffic.
1346 for (j = 0; j < nb_rx; j++) {
1347 uint32_t ptype, proto;
1350 ptype = pkt->packet_type &
1352 if (ptype == RTE_PTYPE_L3_IPV4)
1355 proto = IPPROTO_IPV6;
1357 send_single_packet(pkt, portid, proto);
1363 /* Free packets if there are no outbound sessions */
1364 if (unlikely(!ips)) {
1365 rte_pktmbuf_free_bulk(pkts, nb_rx);
1369 rte_ipsec_pkt_process(ips, pkts, nb_rx);
1372 for (j = 0; j < nb_rx; j++) {
1375 pkt->l2_len = RTE_ETHER_HDR_LEN;
1376 send_single_packet(pkt, sa_out_portid,
1384 ipsec_poll_mode_wrkr_launch(void)
1386 static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = {
1387 [INL_PR_F] = ipsec_poll_mode_wrkr_inl_pr,
1388 [INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss,
1390 ipsec_worker_fn_t fn;
1392 if (!app_sa_prm.enable) {
1393 fn = ipsec_poll_mode_worker;
1395 fn = poll_mode_wrkrs[wrkr_flags];
1397 /* Always default to all mode worker */
1399 fn = ipsec_poll_mode_worker;
1406 int ipsec_launch_one_lcore(void *args)
1408 struct eh_conf *conf;
1410 conf = (struct eh_conf *)args;
1412 if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
1413 /* Run in poll mode */
1414 ipsec_poll_mode_wrkr_launch();
1415 } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
1416 /* Run in event mode */
1417 ipsec_eventmode_worker(conf);