1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright (C) 2020 Marvell International Ltd.
6 #include <rte_event_eth_tx_adapter.h>
10 #include "event_helper.h"
12 #include "ipsec-secgw.h"
13 #include "ipsec_worker.h"
15 struct port_drv_mode_data {
16 struct rte_security_session *sess;
17 struct rte_security_ctx *ctx;
20 static inline enum pkt_type
21 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
23 struct rte_ether_hdr *eth;
24 uint32_t ptype = pkt->packet_type;
26 eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
29 if (RTE_ETH_IS_IPV4_HDR(ptype)) {
30 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
31 offsetof(struct ip, ip_p));
32 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
33 return PKT_TYPE_IPSEC_IPV4;
35 return PKT_TYPE_PLAIN_IPV4;
36 } else if (RTE_ETH_IS_IPV6_HDR(ptype)) {
37 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
38 offsetof(struct ip6_hdr, ip6_nxt));
39 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
40 return PKT_TYPE_IPSEC_IPV6;
42 return PKT_TYPE_PLAIN_IPV6;
45 /* Unknown/Unsupported type */
46 return PKT_TYPE_INVALID;
50 update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
52 struct rte_ether_hdr *ethhdr;
54 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
55 memcpy(ðhdr->src_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
56 memcpy(ðhdr->dst_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
60 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
62 /* Save the destination port in the mbuf */
65 /* Save eth queue for Tx */
66 rte_event_eth_tx_adapter_txq_set(m, 0);
70 ev_vector_attr_init(struct rte_event_vector *vec)
78 ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt)
80 if (vec->port == 0xFFFF) {
81 vec->port = pkt->port;
84 if (vec->attr_valid && (vec->port != pkt->port))
89 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
90 struct port_drv_mode_data *data,
93 struct rte_ipsec_session *pri_sess;
100 for (i = 0; i < sa_out->nb_sa; i++) {
106 pri_sess = ipsec_get_primary_session(sa);
110 if (pri_sess->type !=
111 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
113 RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
118 if (sa->portid >= size) {
120 "Port id >= than table size %d, %d\n",
125 /* Use only first inline session found for a given port */
126 if (data[sa->portid].sess)
128 data[sa->portid].sess = pri_sess->security.ses;
129 data[sa->portid].ctx = pri_sess->security.ctx;
134 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
138 if (unlikely(sp == NULL))
141 rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
142 DEFAULT_MAX_CATEGORIES);
144 if (unlikely(res == DISCARD))
146 else if (res == BYPASS) {
156 check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
157 struct traffic_type *ipsec)
162 if (unlikely(sp == NULL || ip->num == 0))
165 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
166 DEFAULT_MAX_CATEGORIES);
169 for (i = 0; i < ip->num; i++) {
172 if (unlikely(res == DISCARD))
174 else if (res == BYPASS)
177 ipsec->res[ipsec->num] = res - 1;
178 ipsec->pkts[ipsec->num++] = m;
185 check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
186 struct traffic_type *ip)
192 if (unlikely(sp == NULL || ip->num == 0))
195 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
196 DEFAULT_MAX_CATEGORIES);
199 for (i = 0; i < ip->num; i++) {
202 if (unlikely(res == DISCARD))
204 else if (res == BYPASS)
207 sa = *(struct ipsec_sa **)rte_security_dynfield(m);
211 /* SPI on the packet should match with the one in SA */
212 if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi))
221 static inline uint16_t
222 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
229 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
230 dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
231 dst_ip = rte_be_to_cpu_32(dst_ip);
233 ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
241 return RTE_MAX_ETHPORTS;
244 /* TODO: To be tested */
245 static inline uint16_t
246 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
254 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
255 ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
256 memcpy(&dst_ip[0], ip6_dst, 16);
258 ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
266 return RTE_MAX_ETHPORTS;
269 static inline uint16_t
270 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
272 if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
273 return route4_pkt(pkt, rt->rt4_ctx);
274 else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
275 return route6_pkt(pkt, rt->rt6_ctx);
277 return RTE_MAX_ETHPORTS;
281 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
282 struct rte_event *ev)
284 struct ipsec_sa *sa = NULL;
285 struct rte_mbuf *pkt;
286 uint16_t port_id = 0;
291 /* Get pkt from event */
294 /* Check the packet type */
295 type = process_ipsec_get_pkt_type(pkt, &nlp);
298 case PKT_TYPE_PLAIN_IPV4:
299 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
300 if (unlikely(pkt->ol_flags &
301 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
303 "Inbound security offload failed\n");
304 goto drop_pkt_and_exit;
306 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
309 /* Check if we have a match */
310 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
312 goto drop_pkt_and_exit;
316 case PKT_TYPE_PLAIN_IPV6:
317 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
318 if (unlikely(pkt->ol_flags &
319 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
321 "Inbound security offload failed\n");
322 goto drop_pkt_and_exit;
324 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
327 /* Check if we have a match */
328 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
330 goto drop_pkt_and_exit;
335 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
336 goto drop_pkt_and_exit;
339 /* Check if the packet has to be bypassed */
340 if (sa_idx == BYPASS)
341 goto route_and_send_pkt;
343 /* Validate sa_idx */
344 if (sa_idx >= ctx->sa_ctx->nb_sa)
345 goto drop_pkt_and_exit;
347 /* Else the packet has to be protected with SA */
349 /* If the packet was IPsec processed, then SA pointer should be set */
351 goto drop_pkt_and_exit;
353 /* SPI on the packet should match with the one in SA */
354 if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
355 goto drop_pkt_and_exit;
358 port_id = get_route(pkt, rt, type);
359 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
361 goto drop_pkt_and_exit;
363 /* else, we have a matching route */
365 /* Update mac addresses */
366 update_mac_addrs(pkt, port_id);
368 /* Update the event with the dest port */
369 ipsec_event_pre_forward(pkt, port_id);
370 return PKT_FORWARDED;
373 RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
374 rte_pktmbuf_free(pkt);
380 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
381 struct rte_event *ev)
383 struct rte_ipsec_session *sess;
384 struct sa_ctx *sa_ctx;
385 struct rte_mbuf *pkt;
386 uint16_t port_id = 0;
392 /* Get pkt from event */
395 /* Check the packet type */
396 type = process_ipsec_get_pkt_type(pkt, &nlp);
399 case PKT_TYPE_PLAIN_IPV4:
400 /* Check if we have a match */
401 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
403 goto drop_pkt_and_exit;
406 case PKT_TYPE_PLAIN_IPV6:
407 /* Check if we have a match */
408 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
410 goto drop_pkt_and_exit;
415 * Only plain IPv4 & IPv6 packets are allowed
416 * on protected port. Drop the rest.
418 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
419 goto drop_pkt_and_exit;
422 /* Check if the packet has to be bypassed */
423 if (sa_idx == BYPASS) {
424 port_id = get_route(pkt, rt, type);
425 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
427 goto drop_pkt_and_exit;
429 /* else, we have a matching route */
433 /* Validate sa_idx */
434 if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
435 goto drop_pkt_and_exit;
437 /* Else the packet has to be protected */
440 sa_ctx = ctx->sa_ctx;
443 sa = &(sa_ctx->sa[sa_idx]);
445 /* Get IPsec session */
446 sess = ipsec_get_primary_session(sa);
448 /* Allow only inline protocol for now */
449 if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
450 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
451 goto drop_pkt_and_exit;
454 rte_security_set_pkt_metadata(sess->security.ctx,
455 sess->security.ses, pkt, NULL);
457 /* Mark the packet for Tx security offload */
458 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
460 /* Get the port to which this pkt need to be submitted */
461 port_id = sa->portid;
464 /* Provide L2 len for Outbound processing */
465 pkt->l2_len = RTE_ETHER_HDR_LEN;
467 /* Update mac addresses */
468 update_mac_addrs(pkt, port_id);
470 /* Update the event with the dest port */
471 ipsec_event_pre_forward(pkt, port_id);
472 return PKT_FORWARDED;
475 RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
476 rte_pktmbuf_free(pkt);
482 ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
483 struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
485 struct rte_ipsec_session *sess;
486 uint32_t sa_idx, i, j = 0;
487 uint16_t port_id = 0;
488 struct rte_mbuf *pkt;
491 /* Route IPv4 packets */
492 for (i = 0; i < t->ip4.num; i++) {
493 pkt = t->ip4.pkts[i];
494 port_id = route4_pkt(pkt, rt->rt4_ctx);
495 if (port_id != RTE_MAX_ETHPORTS) {
496 /* Update mac addresses */
497 update_mac_addrs(pkt, port_id);
498 /* Update the event with the dest port */
499 ipsec_event_pre_forward(pkt, port_id);
500 ev_vector_attr_update(vec, pkt);
501 vec->mbufs[j++] = pkt;
506 /* Route IPv6 packets */
507 for (i = 0; i < t->ip6.num; i++) {
508 pkt = t->ip6.pkts[i];
509 port_id = route6_pkt(pkt, rt->rt6_ctx);
510 if (port_id != RTE_MAX_ETHPORTS) {
511 /* Update mac addresses */
512 update_mac_addrs(pkt, port_id);
513 /* Update the event with the dest port */
514 ipsec_event_pre_forward(pkt, port_id);
515 ev_vector_attr_update(vec, pkt);
516 vec->mbufs[j++] = pkt;
521 /* Route ESP packets */
522 for (i = 0; i < t->ipsec.num; i++) {
523 /* Validate sa_idx */
524 sa_idx = t->ipsec.res[i];
525 pkt = t->ipsec.pkts[i];
526 if (unlikely(sa_idx >= sa_ctx->nb_sa))
529 /* Else the packet has to be protected */
530 sa = &(sa_ctx->sa[sa_idx]);
531 /* Get IPsec session */
532 sess = ipsec_get_primary_session(sa);
533 /* Allow only inline protocol for now */
534 if (unlikely(sess->type !=
535 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
536 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
539 rte_security_set_pkt_metadata(sess->security.ctx,
540 sess->security.ses, pkt, NULL);
542 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
543 port_id = sa->portid;
544 update_mac_addrs(pkt, port_id);
545 ipsec_event_pre_forward(pkt, port_id);
546 ev_vector_attr_update(vec, pkt);
547 vec->mbufs[j++] = pkt;
555 classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
560 /* Check the packet type */
561 type = process_ipsec_get_pkt_type(pkt, &nlp);
564 case PKT_TYPE_PLAIN_IPV4:
565 t->ip4.data[t->ip4.num] = nlp;
566 t->ip4.pkts[(t->ip4.num)++] = pkt;
568 case PKT_TYPE_PLAIN_IPV6:
569 t->ip6.data[t->ip6.num] = nlp;
570 t->ip6.pkts[(t->ip6.num)++] = pkt;
573 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
580 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
581 struct rte_event_vector *vec)
583 struct ipsec_traffic t;
584 struct rte_mbuf *pkt;
591 for (i = 0; i < vec->nb_elem; i++) {
592 /* Get pkt from event */
595 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
596 if (unlikely(pkt->ol_flags &
597 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
599 "Inbound security offload failed\n");
605 classify_pkt(pkt, &t);
608 check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
609 check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
611 return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
615 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
616 struct rte_event_vector *vec)
618 struct ipsec_traffic t;
619 struct rte_mbuf *pkt;
626 for (i = 0; i < vec->nb_elem; i++) {
627 /* Get pkt from event */
630 classify_pkt(pkt, &t);
632 /* Provide L2 len for Outbound processing */
633 pkt->l2_len = RTE_ETHER_HDR_LEN;
636 check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
637 check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
639 return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
643 process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec,
644 struct port_drv_mode_data *data)
646 struct rte_mbuf *pkt;
651 for (i = 0; i < vec->nb_elem; i++) {
655 if (unlikely(!data[port_id].sess)) {
659 ipsec_event_pre_forward(pkt, port_id);
660 /* Save security session */
661 rte_security_set_pkt_metadata(data[port_id].ctx,
662 data[port_id].sess, pkt,
665 /* Mark the packet for Tx security offload */
666 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
668 /* Provide L2 len for Outbound processing */
669 pkt->l2_len = RTE_ETHER_HDR_LEN;
671 vec->mbufs[j++] = pkt;
678 ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
679 struct eh_event_link_info *links,
680 struct rte_event *ev)
682 struct rte_event_vector *vec = ev->vec;
683 struct rte_mbuf *pkt;
688 ev_vector_attr_init(vec);
689 if (is_unprotected_port(pkt->port))
690 ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
693 ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
698 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
699 links[0].event_port_id,
705 ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
706 struct rte_event *ev,
707 struct port_drv_mode_data *data)
709 struct rte_event_vector *vec = ev->vec;
710 struct rte_mbuf *pkt;
714 if (!is_unprotected_port(pkt->port))
715 vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
717 if (vec->nb_elem > 0)
718 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
719 links[0].event_port_id,
724 * Event mode exposes various operating modes depending on the
725 * capabilities of the event device and the operating mode
729 /* Workers registered */
730 #define IPSEC_EVENTMODE_WORKERS 2
734 * Operating parameters : non-burst - Tx internal port - driver mode
737 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
740 struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
741 unsigned int nb_rx = 0;
742 struct rte_mbuf *pkt;
748 /* Check if we have links registered for this lcore */
750 /* No links registered - exit */
754 memset(&data, 0, sizeof(struct port_drv_mode_data));
757 lcore_id = rte_lcore_id();
760 socket_id = rte_lcore_to_socket_id(lcore_id);
763 * Prepare security sessions table. In outbound driver mode
764 * we always use first session configured for a given port
766 prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
770 "Launching event mode worker (non-burst - Tx internal port - "
771 "driver mode) on lcore %d\n", lcore_id);
773 /* We have valid links */
775 /* Check if it's single link */
778 "Multiple links not supported. Using first link\n");
781 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
782 links[0].event_port_id);
783 while (!force_quit) {
784 /* Read packet from event queues */
785 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
786 links[0].event_port_id,
789 0 /* timeout_ticks */);
794 switch (ev.event_type) {
795 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
796 case RTE_EVENT_TYPE_ETHDEV_VECTOR:
797 ipsec_ev_vector_drv_mode_process(links, &ev, data);
799 case RTE_EVENT_TYPE_ETHDEV:
802 RTE_LOG(ERR, IPSEC, "Invalid event type %u",
810 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
813 ipsec_event_pre_forward(pkt, port_id);
815 if (!is_unprotected_port(port_id)) {
817 if (unlikely(!data[port_id].sess)) {
818 rte_pktmbuf_free(pkt);
822 /* Save security session */
823 rte_security_set_pkt_metadata(data[port_id].ctx,
824 data[port_id].sess, pkt,
827 /* Mark the packet for Tx security offload */
828 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
830 /* Provide L2 len for Outbound processing */
831 pkt->l2_len = RTE_ETHER_HDR_LEN;
835 * Since tx internal port is available, events can be
836 * directly enqueued to the adapter and it would be
837 * internally submitted to the eth device.
839 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
840 links[0].event_port_id,
849 * Operating parameters : non-burst - Tx internal port - app mode
852 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
855 struct lcore_conf_ev_tx_int_port_wrkr lconf;
856 unsigned int nb_rx = 0;
862 /* Check if we have links registered for this lcore */
864 /* No links registered - exit */
868 /* We have valid links */
871 lcore_id = rte_lcore_id();
874 socket_id = rte_lcore_to_socket_id(lcore_id);
876 /* Save routing table */
877 lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
878 lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
879 lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
880 lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
881 lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
882 lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
883 lconf.inbound.session_priv_pool =
884 socket_ctx[socket_id].session_priv_pool;
885 lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
886 lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
887 lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
888 lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
889 lconf.outbound.session_priv_pool =
890 socket_ctx[socket_id].session_priv_pool;
893 "Launching event mode worker (non-burst - Tx internal port - "
894 "app mode) on lcore %d\n", lcore_id);
896 /* Check if it's single link */
899 "Multiple links not supported. Using first link\n");
902 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
903 links[0].event_port_id);
905 while (!force_quit) {
906 /* Read packet from event queues */
907 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
908 links[0].event_port_id,
911 0 /* timeout_ticks */);
916 switch (ev.event_type) {
917 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
918 case RTE_EVENT_TYPE_ETHDEV_VECTOR:
919 ipsec_ev_vector_process(&lconf, links, &ev);
921 case RTE_EVENT_TYPE_ETHDEV:
924 RTE_LOG(ERR, IPSEC, "Invalid event type %u",
929 if (is_unprotected_port(ev.mbuf->port))
930 ret = process_ipsec_ev_inbound(&lconf.inbound,
933 ret = process_ipsec_ev_outbound(&lconf.outbound,
936 /* The pkt has been dropped */
940 * Since tx internal port is available, events can be
941 * directly enqueued to the adapter and it would be
942 * internally submitted to the eth device.
944 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
945 links[0].event_port_id,
953 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
955 struct eh_app_worker_params *wrkr;
956 uint8_t nb_wrkr_param = 0;
961 /* Non-burst - Tx internal port - driver mode */
962 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
963 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
964 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
965 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
969 /* Non-burst - Tx internal port - app mode */
970 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
971 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
972 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
973 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
976 return nb_wrkr_param;
980 ipsec_eventmode_worker(struct eh_conf *conf)
982 struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
984 uint8_t nb_wrkr_param;
986 /* Populate l2fwd_wrkr params */
987 nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
990 * Launch correct worker after checking
991 * the event device's capabilities.
993 eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
996 int ipsec_launch_one_lcore(void *args)
998 struct eh_conf *conf;
1000 conf = (struct eh_conf *)args;
1002 if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
1003 /* Run in poll mode */
1004 ipsec_poll_mode_worker();
1005 } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
1006 /* Run in event mode */
1007 ipsec_eventmode_worker(conf);