examples/ipsec-secgw: add poll mode worker for inline proto
[dpdk.git] / examples / ipsec-secgw / ipsec_worker.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright (C) 2020 Marvell International Ltd.
4  */
5 #include <rte_acl.h>
6 #include <rte_event_eth_tx_adapter.h>
7 #include <rte_lpm.h>
8 #include <rte_lpm6.h>
9
10 #include "event_helper.h"
11 #include "ipsec.h"
12 #include "ipsec-secgw.h"
13 #include "ipsec_worker.h"
14
15 struct port_drv_mode_data {
16         struct rte_security_session *sess;
17         struct rte_security_ctx *ctx;
18 };
19
20 typedef void (*ipsec_worker_fn_t)(void);
21
22 static inline enum pkt_type
23 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
24 {
25         struct rte_ether_hdr *eth;
26         uint32_t ptype = pkt->packet_type;
27
28         eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
29         rte_prefetch0(eth);
30
31         if (RTE_ETH_IS_IPV4_HDR(ptype)) {
32                 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
33                                 offsetof(struct ip, ip_p));
34                 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
35                         return PKT_TYPE_IPSEC_IPV4;
36                 else
37                         return PKT_TYPE_PLAIN_IPV4;
38         } else if (RTE_ETH_IS_IPV6_HDR(ptype)) {
39                 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
40                                 offsetof(struct ip6_hdr, ip6_nxt));
41                 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
42                         return PKT_TYPE_IPSEC_IPV6;
43                 else
44                         return PKT_TYPE_PLAIN_IPV6;
45         }
46
47         /* Unknown/Unsupported type */
48         return PKT_TYPE_INVALID;
49 }
50
51 static inline void
52 update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
53 {
54         struct rte_ether_hdr *ethhdr;
55
56         ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
57         memcpy(&ethhdr->src_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
58         memcpy(&ethhdr->dst_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
59 }
60
61 static inline void
62 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
63 {
64         /* Save the destination port in the mbuf */
65         m->port = port_id;
66
67         /* Save eth queue for Tx */
68         rte_event_eth_tx_adapter_txq_set(m, 0);
69 }
70
71 static inline void
72 ev_vector_attr_init(struct rte_event_vector *vec)
73 {
74         vec->attr_valid = 1;
75         vec->port = 0xFFFF;
76         vec->queue = 0;
77 }
78
79 static inline void
80 ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt)
81 {
82         if (vec->port == 0xFFFF) {
83                 vec->port = pkt->port;
84                 return;
85         }
86         if (vec->attr_valid && (vec->port != pkt->port))
87                 vec->attr_valid = 0;
88 }
89
90 static inline void
91 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
92                          struct port_drv_mode_data *data,
93                          uint16_t size)
94 {
95         struct rte_ipsec_session *pri_sess;
96         struct ipsec_sa *sa;
97         uint32_t i;
98
99         if (!sa_out)
100                 return;
101
102         for (i = 0; i < sa_out->nb_sa; i++) {
103
104                 sa = &sa_out->sa[i];
105                 if (!sa)
106                         continue;
107
108                 pri_sess = ipsec_get_primary_session(sa);
109                 if (!pri_sess)
110                         continue;
111
112                 if (pri_sess->type !=
113                         RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
114
115                         RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
116                                 pri_sess->type);
117                         continue;
118                 }
119
120                 if (sa->portid >= size) {
121                         RTE_LOG(ERR, IPSEC,
122                                 "Port id >= than table size %d, %d\n",
123                                 sa->portid, size);
124                         continue;
125                 }
126
127                 /* Use only first inline session found for a given port */
128                 if (data[sa->portid].sess)
129                         continue;
130                 data[sa->portid].sess = pri_sess->security.ses;
131                 data[sa->portid].ctx = pri_sess->security.ctx;
132         }
133 }
134
135 static inline int
136 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
137 {
138         uint32_t res;
139
140         if (unlikely(sp == NULL))
141                 return 0;
142
143         rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
144                         DEFAULT_MAX_CATEGORIES);
145
146         if (unlikely(res == DISCARD))
147                 return 0;
148         else if (res == BYPASS) {
149                 *sa_idx = -1;
150                 return 1;
151         }
152
153         *sa_idx = res - 1;
154         return 1;
155 }
156
157 static inline void
158 check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
159               struct traffic_type *ipsec)
160 {
161         uint32_t i, j, res;
162         struct rte_mbuf *m;
163
164         if (unlikely(sp == NULL || ip->num == 0))
165                 return;
166
167         rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
168                          DEFAULT_MAX_CATEGORIES);
169
170         j = 0;
171         for (i = 0; i < ip->num; i++) {
172                 m = ip->pkts[i];
173                 res = ip->res[i];
174                 if (unlikely(res == DISCARD))
175                         free_pkts(&m, 1);
176                 else if (res == BYPASS)
177                         ip->pkts[j++] = m;
178                 else {
179                         ipsec->res[ipsec->num] = res - 1;
180                         ipsec->pkts[ipsec->num++] = m;
181                 }
182         }
183         ip->num = j;
184 }
185
186 static inline void
187 check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
188                  struct traffic_type *ip)
189 {
190         struct ipsec_sa *sa;
191         uint32_t i, j, res;
192         struct rte_mbuf *m;
193
194         if (unlikely(sp == NULL || ip->num == 0))
195                 return;
196
197         rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
198                          DEFAULT_MAX_CATEGORIES);
199
200         j = 0;
201         for (i = 0; i < ip->num; i++) {
202                 m = ip->pkts[i];
203                 res = ip->res[i];
204                 if (unlikely(res == DISCARD))
205                         free_pkts(&m, 1);
206                 else if (res == BYPASS)
207                         ip->pkts[j++] = m;
208                 else {
209                         sa = *(struct ipsec_sa **)rte_security_dynfield(m);
210                         if (sa == NULL) {
211                                 free_pkts(&m, 1);
212                                 continue;
213                         }
214
215                         /* SPI on the packet should match with the one in SA */
216                         if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) {
217                                 free_pkts(&m, 1);
218                                 continue;
219                         }
220
221                         ip->pkts[j++] = m;
222                 }
223         }
224         ip->num = j;
225 }
226
227 static inline uint16_t
228 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
229 {
230         uint32_t dst_ip;
231         uint16_t offset;
232         uint32_t hop;
233         int ret;
234
235         offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
236         dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
237         dst_ip = rte_be_to_cpu_32(dst_ip);
238
239         ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
240
241         if (ret == 0) {
242                 /* We have a hit */
243                 return hop;
244         }
245
246         /* else */
247         return RTE_MAX_ETHPORTS;
248 }
249
250 /* TODO: To be tested */
251 static inline uint16_t
252 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
253 {
254         uint8_t dst_ip[16];
255         uint8_t *ip6_dst;
256         uint16_t offset;
257         uint32_t hop;
258         int ret;
259
260         offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
261         ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
262         memcpy(&dst_ip[0], ip6_dst, 16);
263
264         ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
265
266         if (ret == 0) {
267                 /* We have a hit */
268                 return hop;
269         }
270
271         /* else */
272         return RTE_MAX_ETHPORTS;
273 }
274
275 static inline uint16_t
276 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
277 {
278         if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
279                 return route4_pkt(pkt, rt->rt4_ctx);
280         else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
281                 return route6_pkt(pkt, rt->rt6_ctx);
282
283         return RTE_MAX_ETHPORTS;
284 }
285
286 static inline int
287 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
288                 struct rte_event *ev)
289 {
290         struct ipsec_sa *sa = NULL;
291         struct rte_mbuf *pkt;
292         uint16_t port_id = 0;
293         enum pkt_type type;
294         uint32_t sa_idx;
295         uint8_t *nlp;
296
297         /* Get pkt from event */
298         pkt = ev->mbuf;
299
300         /* Check the packet type */
301         type = process_ipsec_get_pkt_type(pkt, &nlp);
302
303         switch (type) {
304         case PKT_TYPE_PLAIN_IPV4:
305                 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
306                         if (unlikely(pkt->ol_flags &
307                                      RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
308                                 RTE_LOG(ERR, IPSEC,
309                                         "Inbound security offload failed\n");
310                                 goto drop_pkt_and_exit;
311                         }
312                         sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
313                 }
314
315                 /* Check if we have a match */
316                 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
317                         /* No valid match */
318                         goto drop_pkt_and_exit;
319                 }
320                 break;
321
322         case PKT_TYPE_PLAIN_IPV6:
323                 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
324                         if (unlikely(pkt->ol_flags &
325                                      RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
326                                 RTE_LOG(ERR, IPSEC,
327                                         "Inbound security offload failed\n");
328                                 goto drop_pkt_and_exit;
329                         }
330                         sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
331                 }
332
333                 /* Check if we have a match */
334                 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
335                         /* No valid match */
336                         goto drop_pkt_and_exit;
337                 }
338                 break;
339
340         default:
341                 RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
342                            type);
343                 goto drop_pkt_and_exit;
344         }
345
346         /* Check if the packet has to be bypassed */
347         if (sa_idx == BYPASS)
348                 goto route_and_send_pkt;
349
350         /* Validate sa_idx */
351         if (sa_idx >= ctx->sa_ctx->nb_sa)
352                 goto drop_pkt_and_exit;
353
354         /* Else the packet has to be protected with SA */
355
356         /* If the packet was IPsec processed, then SA pointer should be set */
357         if (sa == NULL)
358                 goto drop_pkt_and_exit;
359
360         /* SPI on the packet should match with the one in SA */
361         if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
362                 goto drop_pkt_and_exit;
363
364 route_and_send_pkt:
365         port_id = get_route(pkt, rt, type);
366         if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
367                 /* no match */
368                 goto drop_pkt_and_exit;
369         }
370         /* else, we have a matching route */
371
372         /* Update mac addresses */
373         update_mac_addrs(pkt, port_id);
374
375         /* Update the event with the dest port */
376         ipsec_event_pre_forward(pkt, port_id);
377         return PKT_FORWARDED;
378
379 drop_pkt_and_exit:
380         RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
381         rte_pktmbuf_free(pkt);
382         ev->mbuf = NULL;
383         return PKT_DROPPED;
384 }
385
386 static inline int
387 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
388                 struct rte_event *ev)
389 {
390         struct rte_ipsec_session *sess;
391         struct sa_ctx *sa_ctx;
392         struct rte_mbuf *pkt;
393         uint16_t port_id = 0;
394         struct ipsec_sa *sa;
395         enum pkt_type type;
396         uint32_t sa_idx;
397         uint8_t *nlp;
398
399         /* Get pkt from event */
400         pkt = ev->mbuf;
401
402         /* Check the packet type */
403         type = process_ipsec_get_pkt_type(pkt, &nlp);
404
405         switch (type) {
406         case PKT_TYPE_PLAIN_IPV4:
407                 /* Check if we have a match */
408                 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
409                         /* No valid match */
410                         goto drop_pkt_and_exit;
411                 }
412                 break;
413         case PKT_TYPE_PLAIN_IPV6:
414                 /* Check if we have a match */
415                 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
416                         /* No valid match */
417                         goto drop_pkt_and_exit;
418                 }
419                 break;
420         default:
421                 /*
422                  * Only plain IPv4 & IPv6 packets are allowed
423                  * on protected port. Drop the rest.
424                  */
425                 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
426                 goto drop_pkt_and_exit;
427         }
428
429         /* Check if the packet has to be bypassed */
430         if (sa_idx == BYPASS) {
431                 port_id = get_route(pkt, rt, type);
432                 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
433                         /* no match */
434                         goto drop_pkt_and_exit;
435                 }
436                 /* else, we have a matching route */
437                 goto send_pkt;
438         }
439
440         /* Validate sa_idx */
441         if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
442                 goto drop_pkt_and_exit;
443
444         /* Else the packet has to be protected */
445
446         /* Get SA ctx*/
447         sa_ctx = ctx->sa_ctx;
448
449         /* Get SA */
450         sa = &(sa_ctx->sa[sa_idx]);
451
452         /* Get IPsec session */
453         sess = ipsec_get_primary_session(sa);
454
455         /* Allow only inline protocol for now */
456         if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
457                 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
458                 goto drop_pkt_and_exit;
459         }
460
461         rte_security_set_pkt_metadata(sess->security.ctx,
462                                       sess->security.ses, pkt, NULL);
463
464         /* Mark the packet for Tx security offload */
465         pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
466
467         /* Get the port to which this pkt need to be submitted */
468         port_id = sa->portid;
469
470 send_pkt:
471         /* Provide L2 len for Outbound processing */
472         pkt->l2_len = RTE_ETHER_HDR_LEN;
473
474         /* Update mac addresses */
475         update_mac_addrs(pkt, port_id);
476
477         /* Update the event with the dest port */
478         ipsec_event_pre_forward(pkt, port_id);
479         return PKT_FORWARDED;
480
481 drop_pkt_and_exit:
482         RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
483         rte_pktmbuf_free(pkt);
484         ev->mbuf = NULL;
485         return PKT_DROPPED;
486 }
487
488 static inline int
489 ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
490                     struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
491 {
492         struct rte_ipsec_session *sess;
493         uint32_t sa_idx, i, j = 0;
494         uint16_t port_id = 0;
495         struct rte_mbuf *pkt;
496         struct ipsec_sa *sa;
497
498         /* Route IPv4 packets */
499         for (i = 0; i < t->ip4.num; i++) {
500                 pkt = t->ip4.pkts[i];
501                 port_id = route4_pkt(pkt, rt->rt4_ctx);
502                 if (port_id != RTE_MAX_ETHPORTS) {
503                         /* Update mac addresses */
504                         update_mac_addrs(pkt, port_id);
505                         /* Update the event with the dest port */
506                         ipsec_event_pre_forward(pkt, port_id);
507                         ev_vector_attr_update(vec, pkt);
508                         vec->mbufs[j++] = pkt;
509                 } else
510                         free_pkts(&pkt, 1);
511         }
512
513         /* Route IPv6 packets */
514         for (i = 0; i < t->ip6.num; i++) {
515                 pkt = t->ip6.pkts[i];
516                 port_id = route6_pkt(pkt, rt->rt6_ctx);
517                 if (port_id != RTE_MAX_ETHPORTS) {
518                         /* Update mac addresses */
519                         update_mac_addrs(pkt, port_id);
520                         /* Update the event with the dest port */
521                         ipsec_event_pre_forward(pkt, port_id);
522                         ev_vector_attr_update(vec, pkt);
523                         vec->mbufs[j++] = pkt;
524                 } else
525                         free_pkts(&pkt, 1);
526         }
527
528         /* Route ESP packets */
529         for (i = 0; i < t->ipsec.num; i++) {
530                 /* Validate sa_idx */
531                 sa_idx = t->ipsec.res[i];
532                 pkt = t->ipsec.pkts[i];
533                 if (unlikely(sa_idx >= sa_ctx->nb_sa))
534                         free_pkts(&pkt, 1);
535                 else {
536                         /* Else the packet has to be protected */
537                         sa = &(sa_ctx->sa[sa_idx]);
538                         /* Get IPsec session */
539                         sess = ipsec_get_primary_session(sa);
540                         /* Allow only inline protocol for now */
541                         if (unlikely(sess->type !=
542                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
543                                 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
544                                 free_pkts(&pkt, 1);
545                                 continue;
546                         }
547                         rte_security_set_pkt_metadata(sess->security.ctx,
548                                                 sess->security.ses, pkt, NULL);
549
550                         pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
551                         port_id = sa->portid;
552                         update_mac_addrs(pkt, port_id);
553                         ipsec_event_pre_forward(pkt, port_id);
554                         ev_vector_attr_update(vec, pkt);
555                         vec->mbufs[j++] = pkt;
556                 }
557         }
558
559         return j;
560 }
561
562 static inline void
563 classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
564 {
565         enum pkt_type type;
566         uint8_t *nlp;
567
568         /* Check the packet type */
569         type = process_ipsec_get_pkt_type(pkt, &nlp);
570
571         switch (type) {
572         case PKT_TYPE_PLAIN_IPV4:
573                 t->ip4.data[t->ip4.num] = nlp;
574                 t->ip4.pkts[(t->ip4.num)++] = pkt;
575                 break;
576         case PKT_TYPE_PLAIN_IPV6:
577                 t->ip6.data[t->ip6.num] = nlp;
578                 t->ip6.pkts[(t->ip6.num)++] = pkt;
579                 break;
580         default:
581                 RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
582                            type);
583                 free_pkts(&pkt, 1);
584                 break;
585         }
586 }
587
588 static inline int
589 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
590                                 struct rte_event_vector *vec)
591 {
592         struct ipsec_traffic t;
593         struct rte_mbuf *pkt;
594         int i;
595
596         t.ip4.num = 0;
597         t.ip6.num = 0;
598         t.ipsec.num = 0;
599
600         for (i = 0; i < vec->nb_elem; i++) {
601                 /* Get pkt from event */
602                 pkt = vec->mbufs[i];
603
604                 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
605                         if (unlikely(pkt->ol_flags &
606                                      RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
607                                 RTE_LOG(ERR, IPSEC,
608                                         "Inbound security offload failed\n");
609                                 free_pkts(&pkt, 1);
610                                 continue;
611                         }
612                 }
613
614                 classify_pkt(pkt, &t);
615         }
616
617         check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
618         check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
619
620         return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
621 }
622
623 static inline int
624 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
625                                  struct rte_event_vector *vec)
626 {
627         struct ipsec_traffic t;
628         struct rte_mbuf *pkt;
629         uint32_t i;
630
631         t.ip4.num = 0;
632         t.ip6.num = 0;
633         t.ipsec.num = 0;
634
635         for (i = 0; i < vec->nb_elem; i++) {
636                 /* Get pkt from event */
637                 pkt = vec->mbufs[i];
638
639                 classify_pkt(pkt, &t);
640
641                 /* Provide L2 len for Outbound processing */
642                 pkt->l2_len = RTE_ETHER_HDR_LEN;
643         }
644
645         check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
646         check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
647
648         return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
649 }
650
651 static inline int
652 process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec,
653                                           struct port_drv_mode_data *data)
654 {
655         struct rte_mbuf *pkt;
656         int16_t port_id;
657         uint32_t i;
658         int j = 0;
659
660         for (i = 0; i < vec->nb_elem; i++) {
661                 pkt = vec->mbufs[i];
662                 port_id = pkt->port;
663
664                 if (unlikely(!data[port_id].sess)) {
665                         free_pkts(&pkt, 1);
666                         continue;
667                 }
668                 ipsec_event_pre_forward(pkt, port_id);
669                 /* Save security session */
670                 rte_security_set_pkt_metadata(data[port_id].ctx,
671                                               data[port_id].sess, pkt,
672                                               NULL);
673
674                 /* Mark the packet for Tx security offload */
675                 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
676
677                 /* Provide L2 len for Outbound processing */
678                 pkt->l2_len = RTE_ETHER_HDR_LEN;
679
680                 vec->mbufs[j++] = pkt;
681         }
682
683         return j;
684 }
685
686 static inline void
687 ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
688                         struct eh_event_link_info *links,
689                         struct rte_event *ev)
690 {
691         struct rte_event_vector *vec = ev->vec;
692         struct rte_mbuf *pkt;
693         int ret;
694
695         pkt = vec->mbufs[0];
696
697         ev_vector_attr_init(vec);
698         if (is_unprotected_port(pkt->port))
699                 ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
700                                                       &lconf->rt, vec);
701         else
702                 ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
703                                                        &lconf->rt, vec);
704
705         if (likely(ret > 0)) {
706                 vec->nb_elem = ret;
707                 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
708                                                  links[0].event_port_id,
709                                                  ev, 1, 0);
710         } else {
711                 rte_mempool_put(rte_mempool_from_obj(vec), vec);
712         }
713 }
714
715 static inline void
716 ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
717                                  struct rte_event *ev,
718                                  struct port_drv_mode_data *data)
719 {
720         struct rte_event_vector *vec = ev->vec;
721         struct rte_mbuf *pkt;
722
723         pkt = vec->mbufs[0];
724
725         if (!is_unprotected_port(pkt->port))
726                 vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
727                                                                          data);
728         if (vec->nb_elem > 0)
729                 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
730                                                  links[0].event_port_id,
731                                                  ev, 1, 0);
732         else
733                 rte_mempool_put(rte_mempool_from_obj(vec), vec);
734 }
735
736 /*
737  * Event mode exposes various operating modes depending on the
738  * capabilities of the event device and the operating mode
739  * selected.
740  */
741
742 static void
743 ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev,
744                        void *args __rte_unused)
745 {
746         rte_pktmbuf_free(ev.mbuf);
747 }
748
749 /* Workers registered */
750 #define IPSEC_EVENTMODE_WORKERS         2
751
752 /*
753  * Event mode worker
754  * Operating parameters : non-burst - Tx internal port - driver mode
755  */
756 static void
757 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
758                 uint8_t nb_links)
759 {
760         struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
761         unsigned int nb_rx = 0, nb_tx;
762         struct rte_mbuf *pkt;
763         struct rte_event ev;
764         uint32_t lcore_id;
765         int32_t socket_id;
766         int16_t port_id;
767
768         /* Check if we have links registered for this lcore */
769         if (nb_links == 0) {
770                 /* No links registered - exit */
771                 return;
772         }
773
774         memset(&data, 0, sizeof(struct port_drv_mode_data));
775
776         /* Get core ID */
777         lcore_id = rte_lcore_id();
778
779         /* Get socket ID */
780         socket_id = rte_lcore_to_socket_id(lcore_id);
781
782         /*
783          * Prepare security sessions table. In outbound driver mode
784          * we always use first session configured for a given port
785          */
786         prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
787                                  RTE_MAX_ETHPORTS);
788
789         RTE_LOG(INFO, IPSEC,
790                 "Launching event mode worker (non-burst - Tx internal port - "
791                 "driver mode) on lcore %d\n", lcore_id);
792
793         /* We have valid links */
794
795         /* Check if it's single link */
796         if (nb_links != 1) {
797                 RTE_LOG(INFO, IPSEC,
798                         "Multiple links not supported. Using first link\n");
799         }
800
801         RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
802                         links[0].event_port_id);
803         while (!force_quit) {
804                 /* Read packet from event queues */
805                 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
806                                 links[0].event_port_id,
807                                 &ev,    /* events */
808                                 1,      /* nb_events */
809                                 0       /* timeout_ticks */);
810
811                 if (nb_rx == 0)
812                         continue;
813
814                 switch (ev.event_type) {
815                 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
816                 case RTE_EVENT_TYPE_ETHDEV_VECTOR:
817                         ipsec_ev_vector_drv_mode_process(links, &ev, data);
818                         continue;
819                 case RTE_EVENT_TYPE_ETHDEV:
820                         break;
821                 default:
822                         RTE_LOG(ERR, IPSEC, "Invalid event type %u",
823                                 ev.event_type);
824                         continue;
825                 }
826
827                 pkt = ev.mbuf;
828                 port_id = pkt->port;
829
830                 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
831
832                 /* Process packet */
833                 ipsec_event_pre_forward(pkt, port_id);
834
835                 if (!is_unprotected_port(port_id)) {
836
837                         if (unlikely(!data[port_id].sess)) {
838                                 rte_pktmbuf_free(pkt);
839                                 continue;
840                         }
841
842                         /* Save security session */
843                         rte_security_set_pkt_metadata(data[port_id].ctx,
844                                                       data[port_id].sess, pkt,
845                                                       NULL);
846
847                         /* Mark the packet for Tx security offload */
848                         pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
849
850                         /* Provide L2 len for Outbound processing */
851                         pkt->l2_len = RTE_ETHER_HDR_LEN;
852                 }
853
854                 /*
855                  * Since tx internal port is available, events can be
856                  * directly enqueued to the adapter and it would be
857                  * internally submitted to the eth device.
858                  */
859                 nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
860                                                          links[0].event_port_id,
861                                                          &ev, /* events */
862                                                          1,   /* nb_events */
863                                                          0 /* flags */);
864                 if (!nb_tx)
865                         rte_pktmbuf_free(ev.mbuf);
866         }
867
868         if (ev.u64) {
869                 ev.op = RTE_EVENT_OP_RELEASE;
870                 rte_event_enqueue_burst(links[0].eventdev_id,
871                                         links[0].event_port_id, &ev, 1);
872         }
873
874         rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
875                                ipsec_event_port_flush, NULL);
876 }
877
878 /*
879  * Event mode worker
880  * Operating parameters : non-burst - Tx internal port - app mode
881  */
882 static void
883 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
884                 uint8_t nb_links)
885 {
886         struct lcore_conf_ev_tx_int_port_wrkr lconf;
887         unsigned int nb_rx = 0, nb_tx;
888         struct rte_event ev;
889         uint32_t lcore_id;
890         int32_t socket_id;
891         int ret;
892
893         /* Check if we have links registered for this lcore */
894         if (nb_links == 0) {
895                 /* No links registered - exit */
896                 return;
897         }
898
899         /* We have valid links */
900
901         /* Get core ID */
902         lcore_id = rte_lcore_id();
903
904         /* Get socket ID */
905         socket_id = rte_lcore_to_socket_id(lcore_id);
906
907         /* Save routing table */
908         lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
909         lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
910         lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
911         lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
912         lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
913         lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
914         lconf.inbound.session_priv_pool =
915                         socket_ctx[socket_id].session_priv_pool;
916         lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
917         lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
918         lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
919         lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
920         lconf.outbound.session_priv_pool =
921                         socket_ctx[socket_id].session_priv_pool;
922
923         RTE_LOG(INFO, IPSEC,
924                 "Launching event mode worker (non-burst - Tx internal port - "
925                 "app mode) on lcore %d\n", lcore_id);
926
927         /* Check if it's single link */
928         if (nb_links != 1) {
929                 RTE_LOG(INFO, IPSEC,
930                         "Multiple links not supported. Using first link\n");
931         }
932
933         RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
934                 links[0].event_port_id);
935
936         while (!force_quit) {
937                 /* Read packet from event queues */
938                 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
939                                 links[0].event_port_id,
940                                 &ev,     /* events */
941                                 1,       /* nb_events */
942                                 0        /* timeout_ticks */);
943
944                 if (nb_rx == 0)
945                         continue;
946
947                 switch (ev.event_type) {
948                 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
949                 case RTE_EVENT_TYPE_ETHDEV_VECTOR:
950                         ipsec_ev_vector_process(&lconf, links, &ev);
951                         continue;
952                 case RTE_EVENT_TYPE_ETHDEV:
953                         break;
954                 default:
955                         RTE_LOG(ERR, IPSEC, "Invalid event type %u",
956                                 ev.event_type);
957                         continue;
958                 }
959
960                 if (is_unprotected_port(ev.mbuf->port))
961                         ret = process_ipsec_ev_inbound(&lconf.inbound,
962                                                         &lconf.rt, &ev);
963                 else
964                         ret = process_ipsec_ev_outbound(&lconf.outbound,
965                                                         &lconf.rt, &ev);
966                 if (ret != 1)
967                         /* The pkt has been dropped */
968                         continue;
969
970                 /*
971                  * Since tx internal port is available, events can be
972                  * directly enqueued to the adapter and it would be
973                  * internally submitted to the eth device.
974                  */
975                 nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
976                                                          links[0].event_port_id,
977                                                          &ev, /* events */
978                                                          1,   /* nb_events */
979                                                          0 /* flags */);
980                 if (!nb_tx)
981                         rte_pktmbuf_free(ev.mbuf);
982         }
983
984         if (ev.u64) {
985                 ev.op = RTE_EVENT_OP_RELEASE;
986                 rte_event_enqueue_burst(links[0].eventdev_id,
987                                         links[0].event_port_id, &ev, 1);
988         }
989
990         rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
991                                ipsec_event_port_flush, NULL);
992 }
993
994 static uint8_t
995 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
996 {
997         struct eh_app_worker_params *wrkr;
998         uint8_t nb_wrkr_param = 0;
999
1000         /* Save workers */
1001         wrkr = wrkrs;
1002
1003         /* Non-burst - Tx internal port - driver mode */
1004         wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1005         wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1006         wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1007         wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
1008         wrkr++;
1009         nb_wrkr_param++;
1010
1011         /* Non-burst - Tx internal port - app mode */
1012         wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1013         wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1014         wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1015         wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
1016         nb_wrkr_param++;
1017
1018         return nb_wrkr_param;
1019 }
1020
1021 static void
1022 ipsec_eventmode_worker(struct eh_conf *conf)
1023 {
1024         struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
1025                                         {{{0} }, NULL } };
1026         uint8_t nb_wrkr_param;
1027
1028         /* Populate l2fwd_wrkr params */
1029         nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
1030
1031         /*
1032          * Launch correct worker after checking
1033          * the event device's capabilities.
1034          */
1035         eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
1036 }
1037
1038 static __rte_always_inline void
1039 outb_inl_pro_spd_process(struct sp_ctx *sp,
1040                          struct sa_ctx *sa_ctx,
1041                          struct traffic_type *ip,
1042                          struct traffic_type *match,
1043                          struct traffic_type *mismatch,
1044                          bool match_flag,
1045                          struct ipsec_spd_stats *stats)
1046 {
1047         uint32_t prev_sa_idx = UINT32_MAX;
1048         struct rte_mbuf *ipsec[MAX_PKT_BURST];
1049         struct rte_ipsec_session *ips;
1050         uint32_t i, j, j_mis, sa_idx;
1051         struct ipsec_sa *sa = NULL;
1052         uint32_t ipsec_num = 0;
1053         struct rte_mbuf *m;
1054         uint64_t satp;
1055
1056         if (ip->num == 0 || sp == NULL)
1057                 return;
1058
1059         rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
1060                         ip->num, DEFAULT_MAX_CATEGORIES);
1061
1062         j = match->num;
1063         j_mis = mismatch->num;
1064
1065         for (i = 0; i < ip->num; i++) {
1066                 m = ip->pkts[i];
1067                 sa_idx = ip->res[i] - 1;
1068
1069                 if (unlikely(ip->res[i] == DISCARD)) {
1070                         free_pkts(&m, 1);
1071
1072                         stats->discard++;
1073                 } else if (unlikely(ip->res[i] == BYPASS)) {
1074                         match->pkts[j++] = m;
1075
1076                         stats->bypass++;
1077                 } else {
1078                         if (prev_sa_idx == UINT32_MAX) {
1079                                 prev_sa_idx = sa_idx;
1080                                 sa = &sa_ctx->sa[sa_idx];
1081                                 ips = ipsec_get_primary_session(sa);
1082                                 satp = rte_ipsec_sa_type(ips->sa);
1083                         }
1084
1085                         if (sa_idx != prev_sa_idx) {
1086                                 prep_process_group(sa, ipsec, ipsec_num);
1087
1088                                 /* Prepare packets for outbound */
1089                                 rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
1090
1091                                 /* Copy to current tr or a different tr */
1092                                 if (SATP_OUT_IPV4(satp) == match_flag) {
1093                                         memcpy(&match->pkts[j], ipsec,
1094                                                ipsec_num * sizeof(void *));
1095                                         j += ipsec_num;
1096                                 } else {
1097                                         memcpy(&mismatch->pkts[j_mis], ipsec,
1098                                                ipsec_num * sizeof(void *));
1099                                         j_mis += ipsec_num;
1100                                 }
1101
1102                                 /* Update to new SA */
1103                                 sa = &sa_ctx->sa[sa_idx];
1104                                 ips = ipsec_get_primary_session(sa);
1105                                 satp = rte_ipsec_sa_type(ips->sa);
1106                                 ipsec_num = 0;
1107                         }
1108
1109                         ipsec[ipsec_num++] = m;
1110                         stats->protect++;
1111                 }
1112         }
1113
1114         if (ipsec_num) {
1115                 prep_process_group(sa, ipsec, ipsec_num);
1116
1117                 /* Prepare pacekts for outbound */
1118                 rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
1119
1120                 /* Copy to current tr or a different tr */
1121                 if (SATP_OUT_IPV4(satp) == match_flag) {
1122                         memcpy(&match->pkts[j], ipsec,
1123                                ipsec_num * sizeof(void *));
1124                         j += ipsec_num;
1125                 } else {
1126                         memcpy(&mismatch->pkts[j_mis], ipsec,
1127                                ipsec_num * sizeof(void *));
1128                         j_mis += ipsec_num;
1129                 }
1130         }
1131         match->num = j;
1132         mismatch->num = j_mis;
1133 }
1134
1135 /* Poll mode worker when all SA's are of type inline protocol */
1136 void
1137 ipsec_poll_mode_wrkr_inl_pr(void)
1138 {
1139         const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1140                         / US_PER_S * BURST_TX_DRAIN_US;
1141         struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out;
1142         struct rte_mbuf *pkts[MAX_PKT_BURST];
1143         uint64_t prev_tsc, diff_tsc, cur_tsc;
1144         struct ipsec_core_statistics *stats;
1145         struct rt_ctx *rt4_ctx, *rt6_ctx;
1146         struct sa_ctx *sa_in, *sa_out;
1147         struct traffic_type ip4, ip6;
1148         struct lcore_rx_queue *rxql;
1149         struct rte_mbuf **v4, **v6;
1150         struct ipsec_traffic trf;
1151         struct lcore_conf *qconf;
1152         uint16_t v4_num, v6_num;
1153         int32_t socket_id;
1154         uint32_t lcore_id;
1155         int32_t i, nb_rx;
1156         uint16_t portid;
1157         uint8_t queueid;
1158
1159         prev_tsc = 0;
1160         lcore_id = rte_lcore_id();
1161         qconf = &lcore_conf[lcore_id];
1162         rxql = qconf->rx_queue_list;
1163         socket_id = rte_lcore_to_socket_id(lcore_id);
1164         stats = &core_statistics[lcore_id];
1165
1166         rt4_ctx = socket_ctx[socket_id].rt_ip4;
1167         rt6_ctx = socket_ctx[socket_id].rt_ip6;
1168
1169         sp4_in = socket_ctx[socket_id].sp_ip4_in;
1170         sp6_in = socket_ctx[socket_id].sp_ip6_in;
1171         sa_in = socket_ctx[socket_id].sa_in;
1172
1173         sp4_out = socket_ctx[socket_id].sp_ip4_out;
1174         sp6_out = socket_ctx[socket_id].sp_ip6_out;
1175         sa_out = socket_ctx[socket_id].sa_out;
1176
1177         qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1178
1179         if (qconf->nb_rx_queue == 0) {
1180                 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1181                         lcore_id);
1182                 return;
1183         }
1184
1185         RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1186
1187         for (i = 0; i < qconf->nb_rx_queue; i++) {
1188                 portid = rxql[i].port_id;
1189                 queueid = rxql[i].queue_id;
1190                 RTE_LOG(INFO, IPSEC,
1191                         " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1192                         lcore_id, portid, queueid);
1193         }
1194
1195         while (!force_quit) {
1196                 cur_tsc = rte_rdtsc();
1197
1198                 /* TX queue buffer drain */
1199                 diff_tsc = cur_tsc - prev_tsc;
1200
1201                 if (unlikely(diff_tsc > drain_tsc)) {
1202                         drain_tx_buffers(qconf);
1203                         prev_tsc = cur_tsc;
1204                 }
1205
1206                 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1207                         /* Read packets from RX queues */
1208                         portid = rxql[i].port_id;
1209                         queueid = rxql[i].queue_id;
1210                         nb_rx = rte_eth_rx_burst(portid, queueid,
1211                                         pkts, MAX_PKT_BURST);
1212
1213                         if (nb_rx <= 0)
1214                                 continue;
1215
1216                         core_stats_update_rx(nb_rx);
1217
1218                         prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
1219
1220                         /* Drop any IPsec traffic */
1221                         free_pkts(trf.ipsec.pkts, trf.ipsec.num);
1222
1223                         if (is_unprotected_port(portid)) {
1224                                 inbound_sp_sa(sp4_in, sa_in, &trf.ip4,
1225                                               trf.ip4.num,
1226                                               &stats->inbound.spd4);
1227
1228                                 inbound_sp_sa(sp6_in, sa_in, &trf.ip6,
1229                                               trf.ip6.num,
1230                                               &stats->inbound.spd6);
1231
1232                                 v4 = trf.ip4.pkts;
1233                                 v4_num = trf.ip4.num;
1234                                 v6 = trf.ip6.pkts;
1235                                 v6_num = trf.ip6.num;
1236                         } else {
1237                                 ip4.num = 0;
1238                                 ip6.num = 0;
1239
1240                                 outb_inl_pro_spd_process(sp4_out, sa_out,
1241                                                          &trf.ip4, &ip4, &ip6,
1242                                                          true,
1243                                                          &stats->outbound.spd4);
1244
1245                                 outb_inl_pro_spd_process(sp6_out, sa_out,
1246                                                          &trf.ip6, &ip6, &ip4,
1247                                                          false,
1248                                                          &stats->outbound.spd6);
1249                                 v4 = ip4.pkts;
1250                                 v4_num = ip4.num;
1251                                 v6 = ip6.pkts;
1252                                 v6_num = ip6.num;
1253                         }
1254
1255                         route4_pkts(rt4_ctx, v4, v4_num, 0, false);
1256                         route6_pkts(rt6_ctx, v6, v6_num);
1257                 }
1258         }
1259 }
1260
1261 /* Poll mode worker when all SA's are of type inline protocol
1262  * and single sa mode is enabled.
1263  */
1264 void
1265 ipsec_poll_mode_wrkr_inl_pr_ss(void)
1266 {
1267         const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1268                         / US_PER_S * BURST_TX_DRAIN_US;
1269         uint16_t sa_out_portid = 0, sa_out_proto = 0;
1270         struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
1271         uint64_t prev_tsc, diff_tsc, cur_tsc;
1272         struct rte_ipsec_session *ips = NULL;
1273         struct lcore_rx_queue *rxql;
1274         struct ipsec_sa *sa = NULL;
1275         struct lcore_conf *qconf;
1276         struct sa_ctx *sa_out;
1277         uint32_t i, nb_rx, j;
1278         int32_t socket_id;
1279         uint32_t lcore_id;
1280         uint16_t portid;
1281         uint8_t queueid;
1282
1283         prev_tsc = 0;
1284         lcore_id = rte_lcore_id();
1285         qconf = &lcore_conf[lcore_id];
1286         rxql = qconf->rx_queue_list;
1287         socket_id = rte_lcore_to_socket_id(lcore_id);
1288
1289         /* Get SA info */
1290         sa_out = socket_ctx[socket_id].sa_out;
1291         if (sa_out && single_sa_idx < sa_out->nb_sa) {
1292                 sa = &sa_out->sa[single_sa_idx];
1293                 ips = ipsec_get_primary_session(sa);
1294                 sa_out_portid = sa->portid;
1295                 if (sa->flags & IP6_TUNNEL)
1296                         sa_out_proto = IPPROTO_IPV6;
1297                 else
1298                         sa_out_proto = IPPROTO_IP;
1299         }
1300
1301         qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1302
1303         if (qconf->nb_rx_queue == 0) {
1304                 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1305                         lcore_id);
1306                 return;
1307         }
1308
1309         RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1310
1311         for (i = 0; i < qconf->nb_rx_queue; i++) {
1312                 portid = rxql[i].port_id;
1313                 queueid = rxql[i].queue_id;
1314                 RTE_LOG(INFO, IPSEC,
1315                         " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1316                         lcore_id, portid, queueid);
1317         }
1318
1319         while (!force_quit) {
1320                 cur_tsc = rte_rdtsc();
1321
1322                 /* TX queue buffer drain */
1323                 diff_tsc = cur_tsc - prev_tsc;
1324
1325                 if (unlikely(diff_tsc > drain_tsc)) {
1326                         drain_tx_buffers(qconf);
1327                         prev_tsc = cur_tsc;
1328                 }
1329
1330                 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1331                         /* Read packets from RX queues */
1332                         portid = rxql[i].port_id;
1333                         queueid = rxql[i].queue_id;
1334                         nb_rx = rte_eth_rx_burst(portid, queueid,
1335                                                  pkts, MAX_PKT_BURST);
1336
1337                         if (nb_rx <= 0)
1338                                 continue;
1339
1340                         core_stats_update_rx(nb_rx);
1341
1342                         if (is_unprotected_port(portid)) {
1343                                 /* Nothing much to do for inbound inline
1344                                  * decrypted traffic.
1345                                  */
1346                                 for (j = 0; j < nb_rx; j++) {
1347                                         uint32_t ptype, proto;
1348
1349                                         pkt = pkts[j];
1350                                         ptype = pkt->packet_type &
1351                                                 RTE_PTYPE_L3_MASK;
1352                                         if (ptype == RTE_PTYPE_L3_IPV4)
1353                                                 proto = IPPROTO_IP;
1354                                         else
1355                                                 proto = IPPROTO_IPV6;
1356
1357                                         send_single_packet(pkt, portid, proto);
1358                                 }
1359
1360                                 continue;
1361                         }
1362
1363                         /* Free packets if there are no outbound sessions */
1364                         if (unlikely(!ips)) {
1365                                 rte_pktmbuf_free_bulk(pkts, nb_rx);
1366                                 continue;
1367                         }
1368
1369                         rte_ipsec_pkt_process(ips, pkts, nb_rx);
1370
1371                         /* Send pkts out */
1372                         for (j = 0; j < nb_rx; j++) {
1373                                 pkt = pkts[j];
1374
1375                                 pkt->l2_len = RTE_ETHER_HDR_LEN;
1376                                 send_single_packet(pkt, sa_out_portid,
1377                                                    sa_out_proto);
1378                         }
1379                 }
1380         }
1381 }
1382
1383 static void
1384 ipsec_poll_mode_wrkr_launch(void)
1385 {
1386         static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = {
1387                 [INL_PR_F]        = ipsec_poll_mode_wrkr_inl_pr,
1388                 [INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss,
1389         };
1390         ipsec_worker_fn_t fn;
1391
1392         if (!app_sa_prm.enable) {
1393                 fn = ipsec_poll_mode_worker;
1394         } else {
1395                 fn = poll_mode_wrkrs[wrkr_flags];
1396
1397                 /* Always default to all mode worker */
1398                 if (!fn)
1399                         fn = ipsec_poll_mode_worker;
1400         }
1401
1402         /* Launch worker */
1403         (*fn)();
1404 }
1405
1406 int ipsec_launch_one_lcore(void *args)
1407 {
1408         struct eh_conf *conf;
1409
1410         conf = (struct eh_conf *)args;
1411
1412         if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
1413                 /* Run in poll mode */
1414                 ipsec_poll_mode_wrkr_launch();
1415         } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
1416                 /* Run in event mode */
1417                 ipsec_eventmode_worker(conf);
1418         }
1419         return 0;
1420 }