test mbuf attach
[dpdk.git] / lib / librte_ipsec / sa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2020 Intel Corporation
3  */
4
5 #include <rte_ipsec.h>
6 #include <rte_esp.h>
7 #include <rte_ip.h>
8 #include <rte_errno.h>
9 #include <rte_cryptodev.h>
10
11 #include "sa.h"
12 #include "ipsec_sqn.h"
13 #include "crypto.h"
14 #include "iph.h"
15 #include "misc.h"
16 #include "pad.h"
17
18 #define MBUF_MAX_L2_LEN         RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
19 #define MBUF_MAX_L3_LEN         RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t)
20
21 /* some helper structures */
22 struct crypto_xform {
23         struct rte_crypto_auth_xform *auth;
24         struct rte_crypto_cipher_xform *cipher;
25         struct rte_crypto_aead_xform *aead;
26 };
27
28 /*
29  * helper routine, fills internal crypto_xform structure.
30  */
31 static int
32 fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
33         const struct rte_ipsec_sa_prm *prm)
34 {
35         struct rte_crypto_sym_xform *xf, *xfn;
36
37         memset(xform, 0, sizeof(*xform));
38
39         xf = prm->crypto_xform;
40         if (xf == NULL)
41                 return -EINVAL;
42
43         xfn = xf->next;
44
45         /* for AEAD just one xform required */
46         if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
47                 if (xfn != NULL)
48                         return -EINVAL;
49                 xform->aead = &xf->aead;
50         /*
51          * CIPHER+AUTH xforms are expected in strict order,
52          * depending on SA direction:
53          * inbound: AUTH+CIPHER
54          * outbound: CIPHER+AUTH
55          */
56         } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
57
58                 /* wrong order or no cipher */
59                 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
60                                 xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
61                         return -EINVAL;
62
63                 xform->auth = &xf->auth;
64                 xform->cipher = &xfn->cipher;
65
66         } else {
67
68                 /* wrong order or no auth */
69                 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
70                                 xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
71                         return -EINVAL;
72
73                 xform->cipher = &xf->cipher;
74                 xform->auth = &xfn->auth;
75         }
76
77         return 0;
78 }
79
80 uint64_t
81 rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
82 {
83         return sa->type;
84 }
85
86 /**
87  * Based on number of buckets calculated required size for the
88  * structure that holds replay window and sequence number (RSN) information.
89  */
90 static size_t
91 rsn_size(uint32_t nb_bucket)
92 {
93         size_t sz;
94         struct replay_sqn *rsn;
95
96         sz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);
97         sz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
98         return sz;
99 }
100
101 /*
102  * for given size, calculate required number of buckets.
103  */
104 static uint32_t
105 replay_num_bucket(uint32_t wsz)
106 {
107         uint32_t nb;
108
109         nb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /
110                 WINDOW_BUCKET_SIZE);
111         nb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);
112
113         return nb;
114 }
115
116 static int32_t
117 ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
118 {
119         uint32_t n, sz, wsz;
120
121         wsz = *wnd_sz;
122         n = 0;
123
124         if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
125
126                 /*
127                  * RFC 4303 recommends 64 as minimum window size.
128                  * there is no point to use ESN mode without SQN window,
129                  * so make sure we have at least 64 window when ESN is enalbed.
130                  */
131                 wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
132                         RTE_IPSEC_SATP_ESN_DISABLE) ?
133                         wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
134                 if (wsz != 0)
135                         n = replay_num_bucket(wsz);
136         }
137
138         if (n > WINDOW_BUCKET_MAX)
139                 return -EINVAL;
140
141         *wnd_sz = wsz;
142         *nb_bucket = n;
143
144         sz = rsn_size(n);
145         if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
146                 sz *= REPLAY_SQN_NUM;
147
148         sz += sizeof(struct rte_ipsec_sa);
149         return sz;
150 }
151
152 void
153 rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
154 {
155         memset(sa, 0, sa->size);
156 }
157
158 /*
159  * Determine expected SA type based on input parameters.
160  */
161 static int
162 fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
163 {
164         uint64_t tp;
165
166         tp = 0;
167
168         if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
169                 tp |= RTE_IPSEC_SATP_PROTO_AH;
170         else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
171                 tp |= RTE_IPSEC_SATP_PROTO_ESP;
172         else
173                 return -EINVAL;
174
175         if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
176                 tp |= RTE_IPSEC_SATP_DIR_OB;
177         else if (prm->ipsec_xform.direction ==
178                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
179                 tp |= RTE_IPSEC_SATP_DIR_IB;
180         else
181                 return -EINVAL;
182
183         if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
184                 if (prm->ipsec_xform.tunnel.type ==
185                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
186                         tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
187                 else if (prm->ipsec_xform.tunnel.type ==
188                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6)
189                         tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
190                 else
191                         return -EINVAL;
192
193                 if (prm->tun.next_proto == IPPROTO_IPIP)
194                         tp |= RTE_IPSEC_SATP_IPV4;
195                 else if (prm->tun.next_proto == IPPROTO_IPV6)
196                         tp |= RTE_IPSEC_SATP_IPV6;
197                 else
198                         return -EINVAL;
199         } else if (prm->ipsec_xform.mode ==
200                         RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
201                 tp |= RTE_IPSEC_SATP_MODE_TRANS;
202                 if (prm->trs.proto == IPPROTO_IPIP)
203                         tp |= RTE_IPSEC_SATP_IPV4;
204                 else if (prm->trs.proto == IPPROTO_IPV6)
205                         tp |= RTE_IPSEC_SATP_IPV6;
206                 else
207                         return -EINVAL;
208         } else
209                 return -EINVAL;
210
211         /* check for ESN flag */
212         if (prm->ipsec_xform.options.esn == 0)
213                 tp |= RTE_IPSEC_SATP_ESN_DISABLE;
214         else
215                 tp |= RTE_IPSEC_SATP_ESN_ENABLE;
216
217         /* check for ECN flag */
218         if (prm->ipsec_xform.options.ecn == 0)
219                 tp |= RTE_IPSEC_SATP_ECN_DISABLE;
220         else
221                 tp |= RTE_IPSEC_SATP_ECN_ENABLE;
222
223         /* check for DSCP flag */
224         if (prm->ipsec_xform.options.copy_dscp == 0)
225                 tp |= RTE_IPSEC_SATP_DSCP_DISABLE;
226         else
227                 tp |= RTE_IPSEC_SATP_DSCP_ENABLE;
228
229         /* interpret flags */
230         if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
231                 tp |= RTE_IPSEC_SATP_SQN_ATOM;
232         else
233                 tp |= RTE_IPSEC_SATP_SQN_RAW;
234
235         *type = tp;
236         return 0;
237 }
238
239 /*
240  * Init ESP inbound specific things.
241  */
242 static void
243 esp_inb_init(struct rte_ipsec_sa *sa)
244 {
245         /* these params may differ with new algorithms support */
246         sa->ctp.cipher.offset = sizeof(struct rte_esp_hdr) + sa->iv_len;
247         sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
248
249         /*
250          * for AEAD and NULL algorithms we can assume that
251          * auth and cipher offsets would be equal.
252          */
253         switch (sa->algo_type) {
254         case ALGO_TYPE_AES_GCM:
255         case ALGO_TYPE_NULL:
256                 sa->ctp.auth.raw = sa->ctp.cipher.raw;
257                 break;
258         default:
259                 sa->ctp.auth.offset = 0;
260                 sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
261                 sa->cofs.ofs.cipher.tail = sa->sqh_len;
262                 break;
263         }
264
265         sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
266 }
267
268 /*
269  * Init ESP inbound tunnel specific things.
270  */
271 static void
272 esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
273 {
274         sa->proto = prm->tun.next_proto;
275         esp_inb_init(sa);
276 }
277
278 /*
279  * Init ESP outbound specific things.
280  */
281 static void
282 esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
283 {
284         uint8_t algo_type;
285
286         sa->sqn.outb = 1;
287
288         algo_type = sa->algo_type;
289
290         /*
291          * Setup auth and cipher length and offset.
292          * these params may differ with new algorithms support
293          */
294
295         switch (algo_type) {
296         case ALGO_TYPE_AES_GCM:
297         case ALGO_TYPE_AES_CTR:
298         case ALGO_TYPE_NULL:
299                 sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr) +
300                         sa->iv_len;
301                 sa->ctp.cipher.length = 0;
302                 break;
303         case ALGO_TYPE_AES_CBC:
304         case ALGO_TYPE_3DES_CBC:
305                 sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr);
306                 sa->ctp.cipher.length = sa->iv_len;
307                 break;
308         }
309
310         /*
311          * for AEAD and NULL algorithms we can assume that
312          * auth and cipher offsets would be equal.
313          */
314         switch (algo_type) {
315         case ALGO_TYPE_AES_GCM:
316         case ALGO_TYPE_NULL:
317                 sa->ctp.auth.raw = sa->ctp.cipher.raw;
318                 break;
319         default:
320                 sa->ctp.auth.offset = hlen;
321                 sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
322                         sa->iv_len + sa->sqh_len;
323                 break;
324         }
325
326         sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
327         sa->cofs.ofs.cipher.tail = (sa->ctp.auth.offset + sa->ctp.auth.length) -
328                         (sa->ctp.cipher.offset + sa->ctp.cipher.length);
329 }
330
331 /*
332  * Init ESP outbound tunnel specific things.
333  */
334 static void
335 esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
336 {
337         sa->proto = prm->tun.next_proto;
338         sa->hdr_len = prm->tun.hdr_len;
339         sa->hdr_l3_off = prm->tun.hdr_l3_off;
340
341         /* update l2_len and l3_len fields for outbound mbuf */
342         sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
343                 sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
344
345         memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
346
347         esp_outb_init(sa, sa->hdr_len);
348 }
349
350 /*
351  * helper function, init SA structure.
352  */
353 static int
354 esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
355         const struct crypto_xform *cxf)
356 {
357         static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
358                                 RTE_IPSEC_SATP_MODE_MASK;
359
360         if (prm->ipsec_xform.options.ecn)
361                 sa->tos_mask |= RTE_IPV4_HDR_ECN_MASK;
362
363         if (prm->ipsec_xform.options.copy_dscp)
364                 sa->tos_mask |= RTE_IPV4_HDR_DSCP_MASK;
365
366         if (cxf->aead != NULL) {
367                 switch (cxf->aead->algo) {
368                 case RTE_CRYPTO_AEAD_AES_GCM:
369                         /* RFC 4106 */
370                         sa->aad_len = sizeof(struct aead_gcm_aad);
371                         sa->icv_len = cxf->aead->digest_length;
372                         sa->iv_ofs = cxf->aead->iv.offset;
373                         sa->iv_len = sizeof(uint64_t);
374                         sa->pad_align = IPSEC_PAD_AES_GCM;
375                         sa->algo_type = ALGO_TYPE_AES_GCM;
376                         break;
377                 default:
378                         return -EINVAL;
379                 }
380         } else {
381                 sa->icv_len = cxf->auth->digest_length;
382                 sa->iv_ofs = cxf->cipher->iv.offset;
383                 sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
384
385                 switch (cxf->cipher->algo) {
386                 case RTE_CRYPTO_CIPHER_NULL:
387                         sa->pad_align = IPSEC_PAD_NULL;
388                         sa->iv_len = 0;
389                         sa->algo_type = ALGO_TYPE_NULL;
390                         break;
391
392                 case RTE_CRYPTO_CIPHER_AES_CBC:
393                         sa->pad_align = IPSEC_PAD_AES_CBC;
394                         sa->iv_len = IPSEC_MAX_IV_SIZE;
395                         sa->algo_type = ALGO_TYPE_AES_CBC;
396                         break;
397
398                 case RTE_CRYPTO_CIPHER_AES_CTR:
399                         /* RFC 3686 */
400                         sa->pad_align = IPSEC_PAD_AES_CTR;
401                         sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
402                         sa->algo_type = ALGO_TYPE_AES_CTR;
403                         break;
404
405                 case RTE_CRYPTO_CIPHER_3DES_CBC:
406                         /* RFC 1851 */
407                         sa->pad_align = IPSEC_PAD_3DES_CBC;
408                         sa->iv_len = IPSEC_3DES_IV_SIZE;
409                         sa->algo_type = ALGO_TYPE_3DES_CBC;
410                         break;
411
412                 default:
413                         return -EINVAL;
414                 }
415         }
416
417         sa->udata = prm->userdata;
418         sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
419         sa->salt = prm->ipsec_xform.salt;
420
421         /* preserve all values except l2_len and l3_len */
422         sa->tx_offload.msk =
423                 ~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
424                                 0, 0, 0, 0, 0);
425
426         switch (sa->type & msk) {
427         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
428         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
429                 esp_inb_tun_init(sa, prm);
430                 break;
431         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
432                 esp_inb_init(sa);
433                 break;
434         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
435         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
436                 esp_outb_tun_init(sa, prm);
437                 break;
438         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
439                 esp_outb_init(sa, 0);
440                 break;
441         }
442
443         return 0;
444 }
445
446 /*
447  * helper function, init SA replay structure.
448  */
449 static void
450 fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket)
451 {
452         sa->replay.win_sz = wnd_sz;
453         sa->replay.nb_bucket = nb_bucket;
454         sa->replay.bucket_index_mask = nb_bucket - 1;
455         sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
456         if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
457                 sa->sqn.inb.rsn[1] = (struct replay_sqn *)
458                         ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
459 }
460
461 int
462 rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
463 {
464         uint64_t type;
465         uint32_t nb, wsz;
466         int32_t rc;
467
468         if (prm == NULL)
469                 return -EINVAL;
470
471         /* determine SA type */
472         rc = fill_sa_type(prm, &type);
473         if (rc != 0)
474                 return rc;
475
476         /* determine required size */
477         wsz = prm->ipsec_xform.replay_win_sz;
478         return ipsec_sa_size(type, &wsz, &nb);
479 }
480
481 int
482 rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
483         uint32_t size)
484 {
485         int32_t rc, sz;
486         uint32_t nb, wsz;
487         uint64_t type;
488         struct crypto_xform cxf;
489
490         if (sa == NULL || prm == NULL)
491                 return -EINVAL;
492
493         /* determine SA type */
494         rc = fill_sa_type(prm, &type);
495         if (rc != 0)
496                 return rc;
497
498         /* determine required size */
499         wsz = prm->ipsec_xform.replay_win_sz;
500         sz = ipsec_sa_size(type, &wsz, &nb);
501         if (sz < 0)
502                 return sz;
503         else if (size < (uint32_t)sz)
504                 return -ENOSPC;
505
506         /* only esp is supported right now */
507         if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
508                 return -EINVAL;
509
510         if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
511                         prm->tun.hdr_len > sizeof(sa->hdr))
512                 return -EINVAL;
513
514         rc = fill_crypto_xform(&cxf, type, prm);
515         if (rc != 0)
516                 return rc;
517
518         /* initialize SA */
519
520         memset(sa, 0, sz);
521         sa->type = type;
522         sa->size = sz;
523
524         /* check for ESN flag */
525         sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
526                 UINT32_MAX : UINT64_MAX;
527
528         rc = esp_sa_init(sa, prm, &cxf);
529         if (rc != 0)
530                 rte_ipsec_sa_fini(sa);
531
532         /* fill replay window related fields */
533         if (nb != 0)
534                 fill_sa_replay(sa, wsz, nb);
535
536         return sz;
537 }
538
539 /*
540  *  setup crypto ops for LOOKASIDE_PROTO type of devices.
541  */
542 static inline void
543 lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
544         struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
545 {
546         uint32_t i;
547         struct rte_crypto_sym_op *sop;
548
549         for (i = 0; i != num; i++) {
550                 sop = cop[i]->sym;
551                 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
552                 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
553                 cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
554                 sop->m_src = mb[i];
555                 __rte_security_attach_session(sop, ss->security.ses);
556         }
557 }
558
559 /*
560  *  setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
561  *  Note that for LOOKASIDE_PROTO all packet modifications will be
562  *  performed by PMD/HW.
563  *  SW has only to prepare crypto op.
564  */
565 static uint16_t
566 lksd_proto_prepare(const struct rte_ipsec_session *ss,
567         struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
568 {
569         lksd_proto_cop_prepare(ss, mb, cop, num);
570         return num;
571 }
572
573 /*
574  * simplest pkt process routine:
575  * all actual processing is already done by HW/PMD,
576  * just check mbuf ol_flags.
577  * used for:
578  * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
579  * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
580  * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
581  */
582 uint16_t
583 pkt_flag_process(const struct rte_ipsec_session *ss,
584                 struct rte_mbuf *mb[], uint16_t num)
585 {
586         uint32_t i, k;
587         uint32_t dr[num];
588
589         RTE_SET_USED(ss);
590
591         k = 0;
592         for (i = 0; i != num; i++) {
593                 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
594                         k++;
595                 else
596                         dr[i - k] = i;
597         }
598
599         /* handle unprocessed mbufs */
600         if (k != num) {
601                 rte_errno = EBADMSG;
602                 if (k != 0)
603                         move_bad_mbufs(mb, dr, num, num - k);
604         }
605
606         return k;
607 }
608
609 /*
610  * Select packet processing function for session on LOOKASIDE_NONE
611  * type of device.
612  */
613 static int
614 lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
615                 struct rte_ipsec_sa_pkt_func *pf)
616 {
617         int32_t rc;
618
619         static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
620                         RTE_IPSEC_SATP_MODE_MASK;
621
622         rc = 0;
623         switch (sa->type & msk) {
624         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
625         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
626                 pf->prepare.async = esp_inb_pkt_prepare;
627                 pf->process = esp_inb_tun_pkt_process;
628                 break;
629         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
630                 pf->prepare.async = esp_inb_pkt_prepare;
631                 pf->process = esp_inb_trs_pkt_process;
632                 break;
633         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
634         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
635                 pf->prepare.async = esp_outb_tun_prepare;
636                 pf->process = (sa->sqh_len != 0) ?
637                         esp_outb_sqh_process : pkt_flag_process;
638                 break;
639         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
640                 pf->prepare.async = esp_outb_trs_prepare;
641                 pf->process = (sa->sqh_len != 0) ?
642                         esp_outb_sqh_process : pkt_flag_process;
643                 break;
644         default:
645                 rc = -ENOTSUP;
646         }
647
648         return rc;
649 }
650
651 static int
652 cpu_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
653                 struct rte_ipsec_sa_pkt_func *pf)
654 {
655         int32_t rc;
656
657         static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
658                         RTE_IPSEC_SATP_MODE_MASK;
659
660         rc = 0;
661         switch (sa->type & msk) {
662         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
663         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
664                 pf->prepare.sync = cpu_inb_pkt_prepare;
665                 pf->process = esp_inb_tun_pkt_process;
666                 break;
667         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
668                 pf->prepare.sync = cpu_inb_pkt_prepare;
669                 pf->process = esp_inb_trs_pkt_process;
670                 break;
671         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
672         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
673                 pf->prepare.sync = cpu_outb_tun_pkt_prepare;
674                 pf->process = (sa->sqh_len != 0) ?
675                         esp_outb_sqh_process : pkt_flag_process;
676                 break;
677         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
678                 pf->prepare.sync = cpu_outb_trs_pkt_prepare;
679                 pf->process = (sa->sqh_len != 0) ?
680                         esp_outb_sqh_process : pkt_flag_process;
681                 break;
682         default:
683                 rc = -ENOTSUP;
684         }
685
686         return rc;
687 }
688
689 /*
690  * Select packet processing function for session on INLINE_CRYPTO
691  * type of device.
692  */
693 static int
694 inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
695                 struct rte_ipsec_sa_pkt_func *pf)
696 {
697         int32_t rc;
698
699         static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
700                         RTE_IPSEC_SATP_MODE_MASK;
701
702         rc = 0;
703         switch (sa->type & msk) {
704         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
705         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
706                 pf->process = inline_inb_tun_pkt_process;
707                 break;
708         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
709                 pf->process = inline_inb_trs_pkt_process;
710                 break;
711         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
712         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
713                 pf->process = inline_outb_tun_pkt_process;
714                 break;
715         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
716                 pf->process = inline_outb_trs_pkt_process;
717                 break;
718         default:
719                 rc = -ENOTSUP;
720         }
721
722         return rc;
723 }
724
725 /*
726  * Select packet processing function for given session based on SA parameters
727  * and type of associated with the session device.
728  */
729 int
730 ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
731         const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
732 {
733         int32_t rc;
734
735         rc = 0;
736         pf[0] = (struct rte_ipsec_sa_pkt_func) { {NULL}, NULL };
737
738         switch (ss->type) {
739         case RTE_SECURITY_ACTION_TYPE_NONE:
740                 rc = lksd_none_pkt_func_select(sa, pf);
741                 break;
742         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
743                 rc = inline_crypto_pkt_func_select(sa, pf);
744                 break;
745         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
746                 if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
747                                 RTE_IPSEC_SATP_DIR_IB)
748                         pf->process = pkt_flag_process;
749                 else
750                         pf->process = inline_proto_outb_pkt_process;
751                 break;
752         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
753                 pf->prepare.async = lksd_proto_prepare;
754                 pf->process = pkt_flag_process;
755                 break;
756         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
757                 rc = cpu_crypto_pkt_func_select(sa, pf);
758                 break;
759         default:
760                 rc = -ENOTSUP;
761         }
762
763         return rc;
764 }