1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright (C) 2020 Marvell International Ltd.
6 #include <rte_event_eth_tx_adapter.h>
10 #include "event_helper.h"
12 #include "ipsec-secgw.h"
13 #include "ipsec_worker.h"
15 struct port_drv_mode_data {
16 struct rte_security_session *sess;
17 struct rte_security_ctx *ctx;
20 typedef void (*ipsec_worker_fn_t)(void);
22 static inline enum pkt_type
23 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
25 struct rte_ether_hdr *eth;
26 uint32_t ptype = pkt->packet_type;
28 eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
31 if (RTE_ETH_IS_IPV4_HDR(ptype)) {
32 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
33 offsetof(struct ip, ip_p));
34 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
35 return PKT_TYPE_IPSEC_IPV4;
37 return PKT_TYPE_PLAIN_IPV4;
38 } else if (RTE_ETH_IS_IPV6_HDR(ptype)) {
39 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
40 offsetof(struct ip6_hdr, ip6_nxt));
41 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
42 return PKT_TYPE_IPSEC_IPV6;
44 return PKT_TYPE_PLAIN_IPV6;
47 /* Unknown/Unsupported type */
48 return PKT_TYPE_INVALID;
52 update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
54 struct rte_ether_hdr *ethhdr;
56 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
57 memcpy(ðhdr->src_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
58 memcpy(ðhdr->dst_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
62 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
64 /* Save the destination port in the mbuf */
67 /* Save eth queue for Tx */
68 rte_event_eth_tx_adapter_txq_set(m, 0);
72 ev_vector_attr_init(struct rte_event_vector *vec)
80 ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt)
82 if (vec->port == 0xFFFF) {
83 vec->port = pkt->port;
86 if (vec->attr_valid && (vec->port != pkt->port))
91 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
92 struct port_drv_mode_data *data,
95 struct rte_ipsec_session *pri_sess;
102 for (i = 0; i < sa_out->nb_sa; i++) {
108 pri_sess = ipsec_get_primary_session(sa);
112 if (pri_sess->type !=
113 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
115 RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
120 if (sa->portid >= size) {
122 "Port id >= than table size %d, %d\n",
127 /* Use only first inline session found for a given port */
128 if (data[sa->portid].sess)
130 data[sa->portid].sess = pri_sess->security.ses;
131 data[sa->portid].ctx = pri_sess->security.ctx;
136 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
140 if (unlikely(sp == NULL))
143 rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
144 DEFAULT_MAX_CATEGORIES);
146 if (unlikely(res == DISCARD))
148 else if (res == BYPASS) {
158 check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
159 struct traffic_type *ipsec)
164 if (unlikely(sp == NULL || ip->num == 0))
167 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
168 DEFAULT_MAX_CATEGORIES);
171 for (i = 0; i < ip->num; i++) {
174 if (unlikely(res == DISCARD))
176 else if (res == BYPASS)
179 ipsec->res[ipsec->num] = res - 1;
180 ipsec->pkts[ipsec->num++] = m;
187 check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
188 struct traffic_type *ip)
194 if (unlikely(sp == NULL || ip->num == 0))
197 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
198 DEFAULT_MAX_CATEGORIES);
201 for (i = 0; i < ip->num; i++) {
204 if (unlikely(res == DISCARD))
206 else if (res == BYPASS)
209 sa = *(struct ipsec_sa **)rte_security_dynfield(m);
215 /* SPI on the packet should match with the one in SA */
216 if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) {
227 static inline uint16_t
228 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
235 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
236 dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
237 dst_ip = rte_be_to_cpu_32(dst_ip);
239 ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
247 return RTE_MAX_ETHPORTS;
250 /* TODO: To be tested */
251 static inline uint16_t
252 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
260 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
261 ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
262 memcpy(&dst_ip[0], ip6_dst, 16);
264 ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
272 return RTE_MAX_ETHPORTS;
275 static inline uint16_t
276 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
278 if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
279 return route4_pkt(pkt, rt->rt4_ctx);
280 else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
281 return route6_pkt(pkt, rt->rt6_ctx);
283 return RTE_MAX_ETHPORTS;
287 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
288 struct rte_event *ev)
290 struct ipsec_sa *sa = NULL;
291 struct rte_mbuf *pkt;
292 uint16_t port_id = 0;
297 /* Get pkt from event */
300 /* Check the packet type */
301 type = process_ipsec_get_pkt_type(pkt, &nlp);
304 case PKT_TYPE_PLAIN_IPV4:
305 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
306 if (unlikely(pkt->ol_flags &
307 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
309 "Inbound security offload failed\n");
310 goto drop_pkt_and_exit;
312 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
315 /* Check if we have a match */
316 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
318 goto drop_pkt_and_exit;
322 case PKT_TYPE_PLAIN_IPV6:
323 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
324 if (unlikely(pkt->ol_flags &
325 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
327 "Inbound security offload failed\n");
328 goto drop_pkt_and_exit;
330 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
333 /* Check if we have a match */
334 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
336 goto drop_pkt_and_exit;
341 RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
343 goto drop_pkt_and_exit;
346 /* Check if the packet has to be bypassed */
347 if (sa_idx == BYPASS)
348 goto route_and_send_pkt;
350 /* Validate sa_idx */
351 if (sa_idx >= ctx->sa_ctx->nb_sa)
352 goto drop_pkt_and_exit;
354 /* Else the packet has to be protected with SA */
356 /* If the packet was IPsec processed, then SA pointer should be set */
358 goto drop_pkt_and_exit;
360 /* SPI on the packet should match with the one in SA */
361 if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
362 goto drop_pkt_and_exit;
365 port_id = get_route(pkt, rt, type);
366 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
368 goto drop_pkt_and_exit;
370 /* else, we have a matching route */
372 /* Update mac addresses */
373 update_mac_addrs(pkt, port_id);
375 /* Update the event with the dest port */
376 ipsec_event_pre_forward(pkt, port_id);
377 return PKT_FORWARDED;
380 RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
381 rte_pktmbuf_free(pkt);
387 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
388 struct rte_event *ev)
390 struct rte_ipsec_session *sess;
391 struct sa_ctx *sa_ctx;
392 struct rte_mbuf *pkt;
393 uint16_t port_id = 0;
399 /* Get pkt from event */
402 /* Check the packet type */
403 type = process_ipsec_get_pkt_type(pkt, &nlp);
406 case PKT_TYPE_PLAIN_IPV4:
407 /* Check if we have a match */
408 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
410 goto drop_pkt_and_exit;
413 case PKT_TYPE_PLAIN_IPV6:
414 /* Check if we have a match */
415 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
417 goto drop_pkt_and_exit;
422 * Only plain IPv4 & IPv6 packets are allowed
423 * on protected port. Drop the rest.
425 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
426 goto drop_pkt_and_exit;
429 /* Check if the packet has to be bypassed */
430 if (sa_idx == BYPASS) {
431 port_id = get_route(pkt, rt, type);
432 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
434 goto drop_pkt_and_exit;
436 /* else, we have a matching route */
440 /* Validate sa_idx */
441 if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
442 goto drop_pkt_and_exit;
444 /* Else the packet has to be protected */
447 sa_ctx = ctx->sa_ctx;
450 sa = &(sa_ctx->sa[sa_idx]);
452 /* Get IPsec session */
453 sess = ipsec_get_primary_session(sa);
455 /* Allow only inline protocol for now */
456 if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
457 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
458 goto drop_pkt_and_exit;
461 rte_security_set_pkt_metadata(sess->security.ctx,
462 sess->security.ses, pkt, NULL);
464 /* Mark the packet for Tx security offload */
465 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
467 /* Get the port to which this pkt need to be submitted */
468 port_id = sa->portid;
471 /* Provide L2 len for Outbound processing */
472 pkt->l2_len = RTE_ETHER_HDR_LEN;
474 /* Update mac addresses */
475 update_mac_addrs(pkt, port_id);
477 /* Update the event with the dest port */
478 ipsec_event_pre_forward(pkt, port_id);
479 return PKT_FORWARDED;
482 RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
483 rte_pktmbuf_free(pkt);
489 ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
490 struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
492 struct rte_ipsec_session *sess;
493 uint32_t sa_idx, i, j = 0;
494 uint16_t port_id = 0;
495 struct rte_mbuf *pkt;
498 /* Route IPv4 packets */
499 for (i = 0; i < t->ip4.num; i++) {
500 pkt = t->ip4.pkts[i];
501 port_id = route4_pkt(pkt, rt->rt4_ctx);
502 if (port_id != RTE_MAX_ETHPORTS) {
503 /* Update mac addresses */
504 update_mac_addrs(pkt, port_id);
505 /* Update the event with the dest port */
506 ipsec_event_pre_forward(pkt, port_id);
507 ev_vector_attr_update(vec, pkt);
508 vec->mbufs[j++] = pkt;
513 /* Route IPv6 packets */
514 for (i = 0; i < t->ip6.num; i++) {
515 pkt = t->ip6.pkts[i];
516 port_id = route6_pkt(pkt, rt->rt6_ctx);
517 if (port_id != RTE_MAX_ETHPORTS) {
518 /* Update mac addresses */
519 update_mac_addrs(pkt, port_id);
520 /* Update the event with the dest port */
521 ipsec_event_pre_forward(pkt, port_id);
522 ev_vector_attr_update(vec, pkt);
523 vec->mbufs[j++] = pkt;
528 /* Route ESP packets */
529 for (i = 0; i < t->ipsec.num; i++) {
530 /* Validate sa_idx */
531 sa_idx = t->ipsec.res[i];
532 pkt = t->ipsec.pkts[i];
533 if (unlikely(sa_idx >= sa_ctx->nb_sa))
536 /* Else the packet has to be protected */
537 sa = &(sa_ctx->sa[sa_idx]);
538 /* Get IPsec session */
539 sess = ipsec_get_primary_session(sa);
540 /* Allow only inline protocol for now */
541 if (unlikely(sess->type !=
542 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
543 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
547 rte_security_set_pkt_metadata(sess->security.ctx,
548 sess->security.ses, pkt, NULL);
550 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
551 port_id = sa->portid;
552 update_mac_addrs(pkt, port_id);
553 ipsec_event_pre_forward(pkt, port_id);
554 ev_vector_attr_update(vec, pkt);
555 vec->mbufs[j++] = pkt;
563 classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
568 /* Check the packet type */
569 type = process_ipsec_get_pkt_type(pkt, &nlp);
572 case PKT_TYPE_PLAIN_IPV4:
573 t->ip4.data[t->ip4.num] = nlp;
574 t->ip4.pkts[(t->ip4.num)++] = pkt;
576 case PKT_TYPE_PLAIN_IPV6:
577 t->ip6.data[t->ip6.num] = nlp;
578 t->ip6.pkts[(t->ip6.num)++] = pkt;
581 RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
589 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
590 struct rte_event_vector *vec)
592 struct ipsec_traffic t;
593 struct rte_mbuf *pkt;
600 for (i = 0; i < vec->nb_elem; i++) {
601 /* Get pkt from event */
604 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
605 if (unlikely(pkt->ol_flags &
606 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
608 "Inbound security offload failed\n");
614 classify_pkt(pkt, &t);
617 check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
618 check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
620 return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
624 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
625 struct rte_event_vector *vec)
627 struct ipsec_traffic t;
628 struct rte_mbuf *pkt;
635 for (i = 0; i < vec->nb_elem; i++) {
636 /* Get pkt from event */
639 classify_pkt(pkt, &t);
641 /* Provide L2 len for Outbound processing */
642 pkt->l2_len = RTE_ETHER_HDR_LEN;
645 check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
646 check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
648 return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
652 process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec,
653 struct port_drv_mode_data *data)
655 struct rte_mbuf *pkt;
660 for (i = 0; i < vec->nb_elem; i++) {
664 if (unlikely(!data[port_id].sess)) {
668 ipsec_event_pre_forward(pkt, port_id);
669 /* Save security session */
670 rte_security_set_pkt_metadata(data[port_id].ctx,
671 data[port_id].sess, pkt,
674 /* Mark the packet for Tx security offload */
675 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
677 /* Provide L2 len for Outbound processing */
678 pkt->l2_len = RTE_ETHER_HDR_LEN;
680 vec->mbufs[j++] = pkt;
687 ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
688 struct eh_event_link_info *links,
689 struct rte_event *ev)
691 struct rte_event_vector *vec = ev->vec;
692 struct rte_mbuf *pkt;
697 ev_vector_attr_init(vec);
698 if (is_unprotected_port(pkt->port))
699 ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
702 ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
705 if (likely(ret > 0)) {
707 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
708 links[0].event_port_id,
711 rte_mempool_put(rte_mempool_from_obj(vec), vec);
716 ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
717 struct rte_event *ev,
718 struct port_drv_mode_data *data)
720 struct rte_event_vector *vec = ev->vec;
721 struct rte_mbuf *pkt;
725 if (!is_unprotected_port(pkt->port))
726 vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
728 if (vec->nb_elem > 0)
729 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
730 links[0].event_port_id,
733 rte_mempool_put(rte_mempool_from_obj(vec), vec);
737 * Event mode exposes various operating modes depending on the
738 * capabilities of the event device and the operating mode
743 ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev,
744 void *args __rte_unused)
746 rte_pktmbuf_free(ev.mbuf);
749 /* Workers registered */
750 #define IPSEC_EVENTMODE_WORKERS 2
754 * Operating parameters : non-burst - Tx internal port - driver mode
757 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
760 struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
761 unsigned int nb_rx = 0, nb_tx;
762 struct rte_mbuf *pkt;
768 /* Check if we have links registered for this lcore */
770 /* No links registered - exit */
774 memset(&data, 0, sizeof(struct port_drv_mode_data));
777 lcore_id = rte_lcore_id();
780 socket_id = rte_lcore_to_socket_id(lcore_id);
783 * Prepare security sessions table. In outbound driver mode
784 * we always use first session configured for a given port
786 prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
790 "Launching event mode worker (non-burst - Tx internal port - "
791 "driver mode) on lcore %d\n", lcore_id);
793 /* We have valid links */
795 /* Check if it's single link */
798 "Multiple links not supported. Using first link\n");
801 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
802 links[0].event_port_id);
803 while (!force_quit) {
804 /* Read packet from event queues */
805 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
806 links[0].event_port_id,
809 0 /* timeout_ticks */);
814 switch (ev.event_type) {
815 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
816 case RTE_EVENT_TYPE_ETHDEV_VECTOR:
817 ipsec_ev_vector_drv_mode_process(links, &ev, data);
819 case RTE_EVENT_TYPE_ETHDEV:
822 RTE_LOG(ERR, IPSEC, "Invalid event type %u",
830 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
833 ipsec_event_pre_forward(pkt, port_id);
835 if (!is_unprotected_port(port_id)) {
837 if (unlikely(!data[port_id].sess)) {
838 rte_pktmbuf_free(pkt);
842 /* Save security session */
843 rte_security_set_pkt_metadata(data[port_id].ctx,
844 data[port_id].sess, pkt,
847 /* Mark the packet for Tx security offload */
848 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
850 /* Provide L2 len for Outbound processing */
851 pkt->l2_len = RTE_ETHER_HDR_LEN;
855 * Since tx internal port is available, events can be
856 * directly enqueued to the adapter and it would be
857 * internally submitted to the eth device.
859 nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
860 links[0].event_port_id,
865 rte_pktmbuf_free(ev.mbuf);
869 ev.op = RTE_EVENT_OP_RELEASE;
870 rte_event_enqueue_burst(links[0].eventdev_id,
871 links[0].event_port_id, &ev, 1);
874 rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
875 ipsec_event_port_flush, NULL);
880 * Operating parameters : non-burst - Tx internal port - app mode
883 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
886 struct lcore_conf_ev_tx_int_port_wrkr lconf;
887 unsigned int nb_rx = 0, nb_tx;
893 /* Check if we have links registered for this lcore */
895 /* No links registered - exit */
899 /* We have valid links */
902 lcore_id = rte_lcore_id();
905 socket_id = rte_lcore_to_socket_id(lcore_id);
907 /* Save routing table */
908 lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
909 lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
910 lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
911 lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
912 lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
913 lconf.inbound.lcore_id = lcore_id;
914 lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
915 lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
916 lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
917 lconf.outbound.lcore_id = lcore_id;
920 "Launching event mode worker (non-burst - Tx internal port - "
921 "app mode) on lcore %d\n", lcore_id);
923 /* Check if it's single link */
926 "Multiple links not supported. Using first link\n");
929 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
930 links[0].event_port_id);
932 while (!force_quit) {
933 /* Read packet from event queues */
934 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
935 links[0].event_port_id,
938 0 /* timeout_ticks */);
943 switch (ev.event_type) {
944 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
945 case RTE_EVENT_TYPE_ETHDEV_VECTOR:
946 ipsec_ev_vector_process(&lconf, links, &ev);
948 case RTE_EVENT_TYPE_ETHDEV:
951 RTE_LOG(ERR, IPSEC, "Invalid event type %u",
956 if (is_unprotected_port(ev.mbuf->port))
957 ret = process_ipsec_ev_inbound(&lconf.inbound,
960 ret = process_ipsec_ev_outbound(&lconf.outbound,
963 /* The pkt has been dropped */
967 * Since tx internal port is available, events can be
968 * directly enqueued to the adapter and it would be
969 * internally submitted to the eth device.
971 nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
972 links[0].event_port_id,
977 rte_pktmbuf_free(ev.mbuf);
981 ev.op = RTE_EVENT_OP_RELEASE;
982 rte_event_enqueue_burst(links[0].eventdev_id,
983 links[0].event_port_id, &ev, 1);
986 rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
987 ipsec_event_port_flush, NULL);
991 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
993 struct eh_app_worker_params *wrkr;
994 uint8_t nb_wrkr_param = 0;
999 /* Non-burst - Tx internal port - driver mode */
1000 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1001 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1002 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1003 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
1007 /* Non-burst - Tx internal port - app mode */
1008 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1009 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1010 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1011 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
1014 return nb_wrkr_param;
1018 ipsec_eventmode_worker(struct eh_conf *conf)
1020 struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
1022 uint8_t nb_wrkr_param;
1024 /* Populate l2fwd_wrkr params */
1025 nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
1028 * Launch correct worker after checking
1029 * the event device's capabilities.
1031 eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
1034 static __rte_always_inline void
1035 outb_inl_pro_spd_process(struct sp_ctx *sp,
1036 struct sa_ctx *sa_ctx,
1037 struct traffic_type *ip,
1038 struct traffic_type *match,
1039 struct traffic_type *mismatch,
1041 struct ipsec_spd_stats *stats)
1043 uint32_t prev_sa_idx = UINT32_MAX;
1044 struct rte_mbuf *ipsec[MAX_PKT_BURST];
1045 struct rte_ipsec_session *ips;
1046 uint32_t i, j, j_mis, sa_idx;
1047 struct ipsec_sa *sa = NULL;
1048 uint32_t ipsec_num = 0;
1052 if (ip->num == 0 || sp == NULL)
1055 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
1056 ip->num, DEFAULT_MAX_CATEGORIES);
1059 j_mis = mismatch->num;
1061 for (i = 0; i < ip->num; i++) {
1063 sa_idx = ip->res[i] - 1;
1065 if (unlikely(ip->res[i] == DISCARD)) {
1069 } else if (unlikely(ip->res[i] == BYPASS)) {
1070 match->pkts[j++] = m;
1074 if (prev_sa_idx == UINT32_MAX) {
1075 prev_sa_idx = sa_idx;
1076 sa = &sa_ctx->sa[sa_idx];
1077 ips = ipsec_get_primary_session(sa);
1078 satp = rte_ipsec_sa_type(ips->sa);
1081 if (sa_idx != prev_sa_idx) {
1082 prep_process_group(sa, ipsec, ipsec_num);
1084 /* Prepare packets for outbound */
1085 rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
1087 /* Copy to current tr or a different tr */
1088 if (SATP_OUT_IPV4(satp) == match_flag) {
1089 memcpy(&match->pkts[j], ipsec,
1090 ipsec_num * sizeof(void *));
1093 memcpy(&mismatch->pkts[j_mis], ipsec,
1094 ipsec_num * sizeof(void *));
1098 /* Update to new SA */
1099 sa = &sa_ctx->sa[sa_idx];
1100 ips = ipsec_get_primary_session(sa);
1101 satp = rte_ipsec_sa_type(ips->sa);
1105 ipsec[ipsec_num++] = m;
1111 prep_process_group(sa, ipsec, ipsec_num);
1113 /* Prepare pacekts for outbound */
1114 rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
1116 /* Copy to current tr or a different tr */
1117 if (SATP_OUT_IPV4(satp) == match_flag) {
1118 memcpy(&match->pkts[j], ipsec,
1119 ipsec_num * sizeof(void *));
1122 memcpy(&mismatch->pkts[j_mis], ipsec,
1123 ipsec_num * sizeof(void *));
1128 mismatch->num = j_mis;
1131 /* Poll mode worker when all SA's are of type inline protocol */
1133 ipsec_poll_mode_wrkr_inl_pr(void)
1135 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1136 / US_PER_S * BURST_TX_DRAIN_US;
1137 struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out;
1138 struct rte_mbuf *pkts[MAX_PKT_BURST];
1139 uint64_t prev_tsc, diff_tsc, cur_tsc;
1140 struct ipsec_core_statistics *stats;
1141 struct rt_ctx *rt4_ctx, *rt6_ctx;
1142 struct sa_ctx *sa_in, *sa_out;
1143 struct traffic_type ip4, ip6;
1144 struct lcore_rx_queue *rxql;
1145 struct rte_mbuf **v4, **v6;
1146 struct ipsec_traffic trf;
1147 struct lcore_conf *qconf;
1148 uint16_t v4_num, v6_num;
1156 lcore_id = rte_lcore_id();
1157 qconf = &lcore_conf[lcore_id];
1158 rxql = qconf->rx_queue_list;
1159 socket_id = rte_lcore_to_socket_id(lcore_id);
1160 stats = &core_statistics[lcore_id];
1162 rt4_ctx = socket_ctx[socket_id].rt_ip4;
1163 rt6_ctx = socket_ctx[socket_id].rt_ip6;
1165 sp4_in = socket_ctx[socket_id].sp_ip4_in;
1166 sp6_in = socket_ctx[socket_id].sp_ip6_in;
1167 sa_in = socket_ctx[socket_id].sa_in;
1169 sp4_out = socket_ctx[socket_id].sp_ip4_out;
1170 sp6_out = socket_ctx[socket_id].sp_ip6_out;
1171 sa_out = socket_ctx[socket_id].sa_out;
1173 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1175 if (qconf->nb_rx_queue == 0) {
1176 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1181 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1183 for (i = 0; i < qconf->nb_rx_queue; i++) {
1184 portid = rxql[i].port_id;
1185 queueid = rxql[i].queue_id;
1186 RTE_LOG(INFO, IPSEC,
1187 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1188 lcore_id, portid, queueid);
1191 while (!force_quit) {
1192 cur_tsc = rte_rdtsc();
1194 /* TX queue buffer drain */
1195 diff_tsc = cur_tsc - prev_tsc;
1197 if (unlikely(diff_tsc > drain_tsc)) {
1198 drain_tx_buffers(qconf);
1202 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1203 /* Read packets from RX queues */
1204 portid = rxql[i].port_id;
1205 queueid = rxql[i].queue_id;
1206 nb_rx = rte_eth_rx_burst(portid, queueid,
1207 pkts, MAX_PKT_BURST);
1212 core_stats_update_rx(nb_rx);
1214 prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
1216 /* Drop any IPsec traffic */
1217 free_pkts(trf.ipsec.pkts, trf.ipsec.num);
1219 if (is_unprotected_port(portid)) {
1220 inbound_sp_sa(sp4_in, sa_in, &trf.ip4,
1222 &stats->inbound.spd4);
1224 inbound_sp_sa(sp6_in, sa_in, &trf.ip6,
1226 &stats->inbound.spd6);
1229 v4_num = trf.ip4.num;
1231 v6_num = trf.ip6.num;
1236 outb_inl_pro_spd_process(sp4_out, sa_out,
1237 &trf.ip4, &ip4, &ip6,
1239 &stats->outbound.spd4);
1241 outb_inl_pro_spd_process(sp6_out, sa_out,
1242 &trf.ip6, &ip6, &ip4,
1244 &stats->outbound.spd6);
1251 route4_pkts(rt4_ctx, v4, v4_num, 0, false);
1252 route6_pkts(rt6_ctx, v6, v6_num);
1257 /* Poll mode worker when all SA's are of type inline protocol
1258 * and single sa mode is enabled.
1261 ipsec_poll_mode_wrkr_inl_pr_ss(void)
1263 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1264 / US_PER_S * BURST_TX_DRAIN_US;
1265 uint16_t sa_out_portid = 0, sa_out_proto = 0;
1266 struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
1267 uint64_t prev_tsc, diff_tsc, cur_tsc;
1268 struct rte_ipsec_session *ips = NULL;
1269 struct lcore_rx_queue *rxql;
1270 struct ipsec_sa *sa = NULL;
1271 struct lcore_conf *qconf;
1272 struct sa_ctx *sa_out;
1273 uint32_t i, nb_rx, j;
1280 lcore_id = rte_lcore_id();
1281 qconf = &lcore_conf[lcore_id];
1282 rxql = qconf->rx_queue_list;
1283 socket_id = rte_lcore_to_socket_id(lcore_id);
1286 sa_out = socket_ctx[socket_id].sa_out;
1287 if (sa_out && single_sa_idx < sa_out->nb_sa) {
1288 sa = &sa_out->sa[single_sa_idx];
1289 ips = ipsec_get_primary_session(sa);
1290 sa_out_portid = sa->portid;
1291 if (sa->flags & IP6_TUNNEL)
1292 sa_out_proto = IPPROTO_IPV6;
1294 sa_out_proto = IPPROTO_IP;
1297 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1299 if (qconf->nb_rx_queue == 0) {
1300 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1305 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1307 for (i = 0; i < qconf->nb_rx_queue; i++) {
1308 portid = rxql[i].port_id;
1309 queueid = rxql[i].queue_id;
1310 RTE_LOG(INFO, IPSEC,
1311 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1312 lcore_id, portid, queueid);
1315 while (!force_quit) {
1316 cur_tsc = rte_rdtsc();
1318 /* TX queue buffer drain */
1319 diff_tsc = cur_tsc - prev_tsc;
1321 if (unlikely(diff_tsc > drain_tsc)) {
1322 drain_tx_buffers(qconf);
1326 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1327 /* Read packets from RX queues */
1328 portid = rxql[i].port_id;
1329 queueid = rxql[i].queue_id;
1330 nb_rx = rte_eth_rx_burst(portid, queueid,
1331 pkts, MAX_PKT_BURST);
1336 core_stats_update_rx(nb_rx);
1338 if (is_unprotected_port(portid)) {
1339 /* Nothing much to do for inbound inline
1340 * decrypted traffic.
1342 for (j = 0; j < nb_rx; j++) {
1343 uint32_t ptype, proto;
1346 ptype = pkt->packet_type &
1348 if (ptype == RTE_PTYPE_L3_IPV4)
1351 proto = IPPROTO_IPV6;
1353 send_single_packet(pkt, portid, proto);
1359 /* Free packets if there are no outbound sessions */
1360 if (unlikely(!ips)) {
1361 rte_pktmbuf_free_bulk(pkts, nb_rx);
1365 rte_ipsec_pkt_process(ips, pkts, nb_rx);
1368 for (j = 0; j < nb_rx; j++) {
1371 pkt->l2_len = RTE_ETHER_HDR_LEN;
1372 send_single_packet(pkt, sa_out_portid,
1380 ipsec_poll_mode_wrkr_launch(void)
1382 static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = {
1383 [INL_PR_F] = ipsec_poll_mode_wrkr_inl_pr,
1384 [INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss,
1386 ipsec_worker_fn_t fn;
1388 if (!app_sa_prm.enable) {
1389 fn = ipsec_poll_mode_worker;
1391 fn = poll_mode_wrkrs[wrkr_flags];
1393 /* Always default to all mode worker */
1395 fn = ipsec_poll_mode_worker;
1402 int ipsec_launch_one_lcore(void *args)
1404 struct eh_conf *conf;
1406 conf = (struct eh_conf *)args;
1408 if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
1409 /* Run in poll mode */
1410 ipsec_poll_mode_wrkr_launch();
1411 } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
1412 /* Run in event mode */
1413 ipsec_eventmode_worker(conf);