examples/ipsec-secgw: add app mode worker
authorLukasz Bartosik <lbartosik@marvell.com>
Thu, 27 Feb 2020 16:18:34 +0000 (17:18 +0100)
committerAkhil Goyal <akhil.goyal@oss.nxp.com>
Sun, 5 Apr 2020 16:26:04 +0000 (18:26 +0200)
Add application inbound/outbound worker thread and
IPsec application processing code for event mode.

Example ipsec-secgw command in app mode:
ipsec-secgw -w 0002:02:00.0,ipsec_in_max_spi=128
-w 0002:03:00.0,ipsec_in_max_spi=128 -w 0002:0e:00.0 -w 0002:10:00.1
--log-level=8 -c 0x1 -- -P -p 0x3 -u 0x1 -f aes-gcm.cfg
--transfer-mode event --event-schedule-type parallel

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
examples/ipsec-secgw/ipsec-secgw.c
examples/ipsec-secgw/ipsec-secgw.h
examples/ipsec-secgw/ipsec.h
examples/ipsec-secgw/ipsec_worker.c
examples/ipsec-secgw/ipsec_worker.h [new file with mode: 0644]

index 1eb7667..2e55614 100644 (file)
 
 #include "event_helper.h"
 #include "ipsec.h"
+#include "ipsec_worker.h"
 #include "parser.h"
 #include "sad.h"
 
 volatile bool force_quit;
 
-#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
-
 #define MAX_JUMBO_PKT_LEN  9600
 
 #define MEMPOOL_CACHE_SIZE 256
@@ -86,29 +85,6 @@ volatile bool force_quit;
 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
 
-#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
-#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
-       (((uint64_t)((a) & 0xff) << 56) | \
-       ((uint64_t)((b) & 0xff) << 48) | \
-       ((uint64_t)((c) & 0xff) << 40) | \
-       ((uint64_t)((d) & 0xff) << 32) | \
-       ((uint64_t)((e) & 0xff) << 24) | \
-       ((uint64_t)((f) & 0xff) << 16) | \
-       ((uint64_t)((g) & 0xff) << 8)  | \
-       ((uint64_t)(h) & 0xff))
-#else
-#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
-       (((uint64_t)((h) & 0xff) << 56) | \
-       ((uint64_t)((g) & 0xff) << 48) | \
-       ((uint64_t)((f) & 0xff) << 40) | \
-       ((uint64_t)((e) & 0xff) << 32) | \
-       ((uint64_t)((d) & 0xff) << 24) | \
-       ((uint64_t)((c) & 0xff) << 16) | \
-       ((uint64_t)((b) & 0xff) << 8) | \
-       ((uint64_t)(a) & 0xff))
-#endif
-#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
-
 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
                (addr)->addr_bytes[0], (addr)->addr_bytes[1], \
                (addr)->addr_bytes[2], (addr)->addr_bytes[3], \
@@ -120,11 +96,6 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
 
 #define MTU_TO_FRAMELEN(x)     ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
 
-/* port/source ethernet addr and destination ethernet addr */
-struct ethaddr_info {
-       uint64_t src, dst;
-};
-
 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
        { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
        { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
index a07a920..4b53cb5 100644 (file)
@@ -8,6 +8,69 @@
 
 #define NB_SOCKETS 4
 
+#define MAX_PKT_BURST 32
+
+#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
+
+#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
+#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
+       (((uint64_t)((a) & 0xff) << 56) | \
+       ((uint64_t)((b) & 0xff) << 48) | \
+       ((uint64_t)((c) & 0xff) << 40) | \
+       ((uint64_t)((d) & 0xff) << 32) | \
+       ((uint64_t)((e) & 0xff) << 24) | \
+       ((uint64_t)((f) & 0xff) << 16) | \
+       ((uint64_t)((g) & 0xff) << 8)  | \
+       ((uint64_t)(h) & 0xff))
+#else
+#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
+       (((uint64_t)((h) & 0xff) << 56) | \
+       ((uint64_t)((g) & 0xff) << 48) | \
+       ((uint64_t)((f) & 0xff) << 40) | \
+       ((uint64_t)((e) & 0xff) << 32) | \
+       ((uint64_t)((d) & 0xff) << 24) | \
+       ((uint64_t)((c) & 0xff) << 16) | \
+       ((uint64_t)((b) & 0xff) << 8) | \
+       ((uint64_t)(a) & 0xff))
+#endif
+
+#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
+
+struct traffic_type {
+       const uint8_t *data[MAX_PKT_BURST * 2];
+       struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
+       void *saptr[MAX_PKT_BURST * 2];
+       uint32_t res[MAX_PKT_BURST * 2];
+       uint32_t num;
+};
+
+struct ipsec_traffic {
+       struct traffic_type ipsec;
+       struct traffic_type ip4;
+       struct traffic_type ip6;
+};
+
+/* Fields optimized for devices without burst */
+struct traffic_type_nb {
+       const uint8_t *data;
+       struct rte_mbuf *pkt;
+       uint32_t res;
+       uint32_t num;
+};
+
+struct ipsec_traffic_nb {
+       struct traffic_type_nb ipsec;
+       struct traffic_type_nb ip4;
+       struct traffic_type_nb ip6;
+};
+
+/* port/source ethernet addr and destination ethernet addr */
+struct ethaddr_info {
+       uint64_t src, dst;
+};
+
+extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
+
 /* Port mask to identify the unprotected ports */
 extern uint32_t unprotected_port_mask;
 
index ad913bf..f8f29f9 100644 (file)
 
 #include "ipsec-secgw.h"
 
-#define RTE_LOGTYPE_IPSEC       RTE_LOGTYPE_USER1
 #define RTE_LOGTYPE_IPSEC_ESP   RTE_LOGTYPE_USER2
 #define RTE_LOGTYPE_IPSEC_IPIP  RTE_LOGTYPE_USER3
 
-#define MAX_PKT_BURST 32
 #define MAX_INFLIGHT 128
 #define MAX_QP_PER_LCORE 256
 
@@ -259,20 +257,6 @@ struct cnt_blk {
        uint32_t cnt;
 } __attribute__((packed));
 
-struct traffic_type {
-       const uint8_t *data[MAX_PKT_BURST * 2];
-       struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
-       void *saptr[MAX_PKT_BURST * 2];
-       uint32_t res[MAX_PKT_BURST * 2];
-       uint32_t num;
-};
-
-struct ipsec_traffic {
-       struct traffic_type ipsec;
-       struct traffic_type ip4;
-       struct traffic_type ip6;
-};
-
 /* Socket ctx */
 extern struct socket_ctx socket_ctx[NB_SOCKETS];
 
index b7a1ef9..5fde667 100644 (file)
@@ -2,11 +2,51 @@
  * Copyright(c) 2010-2016 Intel Corporation
  * Copyright (C) 2020 Marvell International Ltd.
  */
+#include <rte_acl.h>
 #include <rte_event_eth_tx_adapter.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
 
 #include "event_helper.h"
 #include "ipsec.h"
 #include "ipsec-secgw.h"
+#include "ipsec_worker.h"
+
+static inline enum pkt_type
+process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
+{
+       struct rte_ether_hdr *eth;
+
+       eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+       if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+               *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
+                               offsetof(struct ip, ip_p));
+               if (**nlp == IPPROTO_ESP)
+                       return PKT_TYPE_IPSEC_IPV4;
+               else
+                       return PKT_TYPE_PLAIN_IPV4;
+       } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+               *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
+                               offsetof(struct ip6_hdr, ip6_nxt));
+               if (**nlp == IPPROTO_ESP)
+                       return PKT_TYPE_IPSEC_IPV6;
+               else
+                       return PKT_TYPE_PLAIN_IPV6;
+       }
+
+       /* Unknown/Unsupported type */
+       return PKT_TYPE_INVALID;
+}
+
+static inline void
+update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
+{
+       struct rte_ether_hdr *ethhdr;
+
+       ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+       memcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
+       memcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
+}
 
 static inline void
 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
@@ -61,6 +101,290 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
        }
 }
 
+static inline int
+check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
+{
+       uint32_t res;
+
+       if (unlikely(sp == NULL))
+               return 0;
+
+       rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
+                       DEFAULT_MAX_CATEGORIES);
+
+       if (unlikely(res == 0)) {
+               /* No match */
+               return 0;
+       }
+
+       if (res == DISCARD)
+               return 0;
+       else if (res == BYPASS) {
+               *sa_idx = -1;
+               return 1;
+       }
+
+       *sa_idx = res - 1;
+       return 1;
+}
+
+static inline uint16_t
+route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
+{
+       uint32_t dst_ip;
+       uint16_t offset;
+       uint32_t hop;
+       int ret;
+
+       offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
+       dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
+       dst_ip = rte_be_to_cpu_32(dst_ip);
+
+       ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
+
+       if (ret == 0) {
+               /* We have a hit */
+               return hop;
+       }
+
+       /* else */
+       return RTE_MAX_ETHPORTS;
+}
+
+/* TODO: To be tested */
+static inline uint16_t
+route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
+{
+       uint8_t dst_ip[16];
+       uint8_t *ip6_dst;
+       uint16_t offset;
+       uint32_t hop;
+       int ret;
+
+       offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
+       ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
+       memcpy(&dst_ip[0], ip6_dst, 16);
+
+       ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
+
+       if (ret == 0) {
+               /* We have a hit */
+               return hop;
+       }
+
+       /* else */
+       return RTE_MAX_ETHPORTS;
+}
+
+static inline uint16_t
+get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
+{
+       if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
+               return route4_pkt(pkt, rt->rt4_ctx);
+       else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
+               return route6_pkt(pkt, rt->rt6_ctx);
+
+       return RTE_MAX_ETHPORTS;
+}
+
+static inline int
+process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
+               struct rte_event *ev)
+{
+       struct ipsec_sa *sa = NULL;
+       struct rte_mbuf *pkt;
+       uint16_t port_id = 0;
+       enum pkt_type type;
+       uint32_t sa_idx;
+       uint8_t *nlp;
+
+       /* Get pkt from event */
+       pkt = ev->mbuf;
+
+       /* Check the packet type */
+       type = process_ipsec_get_pkt_type(pkt, &nlp);
+
+       switch (type) {
+       case PKT_TYPE_PLAIN_IPV4:
+               if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
+                       if (unlikely(pkt->ol_flags &
+                                    PKT_RX_SEC_OFFLOAD_FAILED)) {
+                               RTE_LOG(ERR, IPSEC,
+                                       "Inbound security offload failed\n");
+                               goto drop_pkt_and_exit;
+                       }
+                       sa = pkt->userdata;
+               }
+
+               /* Check if we have a match */
+               if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
+                       /* No valid match */
+                       goto drop_pkt_and_exit;
+               }
+               break;
+
+       case PKT_TYPE_PLAIN_IPV6:
+               if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
+                       if (unlikely(pkt->ol_flags &
+                                    PKT_RX_SEC_OFFLOAD_FAILED)) {
+                               RTE_LOG(ERR, IPSEC,
+                                       "Inbound security offload failed\n");
+                               goto drop_pkt_and_exit;
+                       }
+                       sa = pkt->userdata;
+               }
+
+               /* Check if we have a match */
+               if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
+                       /* No valid match */
+                       goto drop_pkt_and_exit;
+               }
+               break;
+
+       default:
+               RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
+               goto drop_pkt_and_exit;
+       }
+
+       /* Check if the packet has to be bypassed */
+       if (sa_idx == BYPASS)
+               goto route_and_send_pkt;
+
+       /* Validate sa_idx */
+       if (sa_idx >= ctx->sa_ctx->nb_sa)
+               goto drop_pkt_and_exit;
+
+       /* Else the packet has to be protected with SA */
+
+       /* If the packet was IPsec processed, then SA pointer should be set */
+       if (sa == NULL)
+               goto drop_pkt_and_exit;
+
+       /* SPI on the packet should match with the one in SA */
+       if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
+               goto drop_pkt_and_exit;
+
+route_and_send_pkt:
+       port_id = get_route(pkt, rt, type);
+       if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
+               /* no match */
+               goto drop_pkt_and_exit;
+       }
+       /* else, we have a matching route */
+
+       /* Update mac addresses */
+       update_mac_addrs(pkt, port_id);
+
+       /* Update the event with the dest port */
+       ipsec_event_pre_forward(pkt, port_id);
+       return PKT_FORWARDED;
+
+drop_pkt_and_exit:
+       RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
+       rte_pktmbuf_free(pkt);
+       ev->mbuf = NULL;
+       return PKT_DROPPED;
+}
+
+static inline int
+process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
+               struct rte_event *ev)
+{
+       struct rte_ipsec_session *sess;
+       struct sa_ctx *sa_ctx;
+       struct rte_mbuf *pkt;
+       uint16_t port_id = 0;
+       struct ipsec_sa *sa;
+       enum pkt_type type;
+       uint32_t sa_idx;
+       uint8_t *nlp;
+
+       /* Get pkt from event */
+       pkt = ev->mbuf;
+
+       /* Check the packet type */
+       type = process_ipsec_get_pkt_type(pkt, &nlp);
+
+       switch (type) {
+       case PKT_TYPE_PLAIN_IPV4:
+               /* Check if we have a match */
+               if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
+                       /* No valid match */
+                       goto drop_pkt_and_exit;
+               }
+               break;
+       case PKT_TYPE_PLAIN_IPV6:
+               /* Check if we have a match */
+               if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
+                       /* No valid match */
+                       goto drop_pkt_and_exit;
+               }
+               break;
+       default:
+               /*
+                * Only plain IPv4 & IPv6 packets are allowed
+                * on protected port. Drop the rest.
+                */
+               RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
+               goto drop_pkt_and_exit;
+       }
+
+       /* Check if the packet has to be bypassed */
+       if (sa_idx == BYPASS) {
+               port_id = get_route(pkt, rt, type);
+               if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
+                       /* no match */
+                       goto drop_pkt_and_exit;
+               }
+               /* else, we have a matching route */
+               goto send_pkt;
+       }
+
+       /* Validate sa_idx */
+       if (sa_idx >= ctx->sa_ctx->nb_sa)
+               goto drop_pkt_and_exit;
+
+       /* Else the packet has to be protected */
+
+       /* Get SA ctx*/
+       sa_ctx = ctx->sa_ctx;
+
+       /* Get SA */
+       sa = &(sa_ctx->sa[sa_idx]);
+
+       /* Get IPsec session */
+       sess = ipsec_get_primary_session(sa);
+
+       /* Allow only inline protocol for now */
+       if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+               RTE_LOG(ERR, IPSEC, "SA type not supported\n");
+               goto drop_pkt_and_exit;
+       }
+
+       if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
+               pkt->userdata = sess->security.ses;
+
+       /* Mark the packet for Tx security offload */
+       pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
+
+       /* Get the port to which this pkt need to be submitted */
+       port_id = sa->portid;
+
+send_pkt:
+       /* Update mac addresses */
+       update_mac_addrs(pkt, port_id);
+
+       /* Update the event with the dest port */
+       ipsec_event_pre_forward(pkt, port_id);
+       return PKT_FORWARDED;
+
+drop_pkt_and_exit:
+       RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
+       rte_pktmbuf_free(pkt);
+       ev->mbuf = NULL;
+       return PKT_DROPPED;
+}
+
 /*
  * Event mode exposes various operating modes depending on the
  * capabilities of the event device and the operating mode
@@ -68,7 +392,7 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
  */
 
 /* Workers registered */
-#define IPSEC_EVENTMODE_WORKERS                1
+#define IPSEC_EVENTMODE_WORKERS                2
 
 /*
  * Event mode worker
@@ -146,7 +470,7 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
                        }
 
                        /* Save security session */
-                       pkt->udata64 = (uint64_t) sess_tbl[port_id];
+                       pkt->userdata = sess_tbl[port_id];
 
                        /* Mark the packet for Tx security offload */
                        pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
@@ -165,6 +489,105 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
        }
 }
 
+/*
+ * Event mode worker
+ * Operating parameters : non-burst - Tx internal port - app mode
+ */
+static void
+ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
+               uint8_t nb_links)
+{
+       struct lcore_conf_ev_tx_int_port_wrkr lconf;
+       unsigned int nb_rx = 0;
+       struct rte_event ev;
+       uint32_t lcore_id;
+       int32_t socket_id;
+       int ret;
+
+       /* Check if we have links registered for this lcore */
+       if (nb_links == 0) {
+               /* No links registered - exit */
+               return;
+       }
+
+       /* We have valid links */
+
+       /* Get core ID */
+       lcore_id = rte_lcore_id();
+
+       /* Get socket ID */
+       socket_id = rte_lcore_to_socket_id(lcore_id);
+
+       /* Save routing table */
+       lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
+       lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
+       lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
+       lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
+       lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
+       lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
+       lconf.inbound.session_priv_pool =
+                       socket_ctx[socket_id].session_priv_pool;
+       lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
+       lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
+       lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
+       lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
+       lconf.outbound.session_priv_pool =
+                       socket_ctx[socket_id].session_priv_pool;
+
+       RTE_LOG(INFO, IPSEC,
+               "Launching event mode worker (non-burst - Tx internal port - "
+               "app mode) on lcore %d\n", lcore_id);
+
+       /* Check if it's single link */
+       if (nb_links != 1) {
+               RTE_LOG(INFO, IPSEC,
+                       "Multiple links not supported. Using first link\n");
+       }
+
+       RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
+               links[0].event_port_id);
+
+       while (!force_quit) {
+               /* Read packet from event queues */
+               nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
+                               links[0].event_port_id,
+                               &ev,     /* events */
+                               1,       /* nb_events */
+                               0        /* timeout_ticks */);
+
+               if (nb_rx == 0)
+                       continue;
+
+               if (unlikely(ev.event_type != RTE_EVENT_TYPE_ETHDEV)) {
+                       RTE_LOG(ERR, IPSEC, "Invalid event type %u",
+                               ev.event_type);
+
+                       continue;
+               }
+
+               if (is_unprotected_port(ev.mbuf->port))
+                       ret = process_ipsec_ev_inbound(&lconf.inbound,
+                                                       &lconf.rt, &ev);
+               else
+                       ret = process_ipsec_ev_outbound(&lconf.outbound,
+                                                       &lconf.rt, &ev);
+               if (ret != 1)
+                       /* The pkt has been dropped */
+                       continue;
+
+               /*
+                * Since tx internal port is available, events can be
+                * directly enqueued to the adapter and it would be
+                * internally submitted to the eth device.
+                */
+               rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+                               links[0].event_port_id,
+                               &ev,    /* events */
+                               1,      /* nb_events */
+                               0       /* flags */);
+       }
+}
+
 static uint8_t
 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
 {
@@ -180,6 +603,14 @@ ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
        wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
        wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
        wrkr++;
+       nb_wrkr_param++;
+
+       /* Non-burst - Tx internal port - app mode */
+       wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
+       wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
+       wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
+       wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
+       nb_wrkr_param++;
 
        return nb_wrkr_param;
 }
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
new file mode 100644 (file)
index 0000000..5d85cf1
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+#ifndef _IPSEC_WORKER_H_
+#define _IPSEC_WORKER_H_
+
+#include "ipsec.h"
+
+enum pkt_type {
+       PKT_TYPE_PLAIN_IPV4 = 1,
+       PKT_TYPE_IPSEC_IPV4,
+       PKT_TYPE_PLAIN_IPV6,
+       PKT_TYPE_IPSEC_IPV6,
+       PKT_TYPE_INVALID
+};
+
+enum {
+       PKT_DROPPED = 0,
+       PKT_FORWARDED,
+       PKT_POSTED      /* for lookaside case */
+};
+
+struct route_table {
+       struct rt_ctx *rt4_ctx;
+       struct rt_ctx *rt6_ctx;
+};
+
+/*
+ * Conf required by event mode worker with tx internal port
+ */
+struct lcore_conf_ev_tx_int_port_wrkr {
+       struct ipsec_ctx inbound;
+       struct ipsec_ctx outbound;
+       struct route_table rt;
+} __rte_cache_aligned;
+
+void ipsec_poll_mode_worker(void);
+
+int ipsec_launch_one_lcore(void *args);
+
+#endif /* _IPSEC_WORKER_H_ */