ipsec: support setting initial ESN value
[dpdk.git] / lib / ipsec / sa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2020 Intel Corporation
3  */
4
5 #include <rte_ipsec.h>
6 #include <rte_esp.h>
7 #include <rte_ip.h>
8 #include <rte_udp.h>
9 #include <rte_errno.h>
10 #include <rte_cryptodev.h>
11
12 #include "sa.h"
13 #include "ipsec_sqn.h"
14 #include "crypto.h"
15 #include "iph.h"
16 #include "misc.h"
17 #include "pad.h"
18
19 #define MBUF_MAX_L2_LEN         RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
20 #define MBUF_MAX_L3_LEN         RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t)
21
22 /* some helper structures */
23 struct crypto_xform {
24         struct rte_crypto_auth_xform *auth;
25         struct rte_crypto_cipher_xform *cipher;
26         struct rte_crypto_aead_xform *aead;
27 };
28
29 /*
30  * helper routine, fills internal crypto_xform structure.
31  */
32 static int
33 fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
34         const struct rte_ipsec_sa_prm *prm)
35 {
36         struct rte_crypto_sym_xform *xf, *xfn;
37
38         memset(xform, 0, sizeof(*xform));
39
40         xf = prm->crypto_xform;
41         if (xf == NULL)
42                 return -EINVAL;
43
44         xfn = xf->next;
45
46         /* for AEAD just one xform required */
47         if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
48                 if (xfn != NULL)
49                         return -EINVAL;
50                 xform->aead = &xf->aead;
51
52         /* GMAC has only auth */
53         } else if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
54                         xf->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
55                 if (xfn != NULL)
56                         return -EINVAL;
57                 xform->auth = &xf->auth;
58                 xform->cipher = &xfn->cipher;
59
60         /*
61          * CIPHER+AUTH xforms are expected in strict order,
62          * depending on SA direction:
63          * inbound: AUTH+CIPHER
64          * outbound: CIPHER+AUTH
65          */
66         } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
67
68                 /* wrong order or no cipher */
69                 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
70                                 xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
71                         return -EINVAL;
72
73                 xform->auth = &xf->auth;
74                 xform->cipher = &xfn->cipher;
75
76         } else {
77
78                 /* wrong order or no auth */
79                 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
80                                 xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
81                         return -EINVAL;
82
83                 xform->cipher = &xf->cipher;
84                 xform->auth = &xfn->auth;
85         }
86
87         return 0;
88 }
89
90 uint64_t
91 rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
92 {
93         return sa->type;
94 }
95
96 /**
97  * Based on number of buckets calculated required size for the
98  * structure that holds replay window and sequence number (RSN) information.
99  */
100 static size_t
101 rsn_size(uint32_t nb_bucket)
102 {
103         size_t sz;
104         struct replay_sqn *rsn;
105
106         sz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);
107         sz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
108         return sz;
109 }
110
111 /*
112  * for given size, calculate required number of buckets.
113  */
114 static uint32_t
115 replay_num_bucket(uint32_t wsz)
116 {
117         uint32_t nb;
118
119         nb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /
120                 WINDOW_BUCKET_SIZE);
121         nb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);
122
123         return nb;
124 }
125
126 static int32_t
127 ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
128 {
129         uint32_t n, sz, wsz;
130
131         wsz = *wnd_sz;
132         n = 0;
133
134         if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
135
136                 /*
137                  * RFC 4303 recommends 64 as minimum window size.
138                  * there is no point to use ESN mode without SQN window,
139                  * so make sure we have at least 64 window when ESN is enalbed.
140                  */
141                 wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
142                         RTE_IPSEC_SATP_ESN_DISABLE) ?
143                         wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
144                 if (wsz != 0)
145                         n = replay_num_bucket(wsz);
146         }
147
148         if (n > WINDOW_BUCKET_MAX)
149                 return -EINVAL;
150
151         *wnd_sz = wsz;
152         *nb_bucket = n;
153
154         sz = rsn_size(n);
155         if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
156                 sz *= REPLAY_SQN_NUM;
157
158         sz += sizeof(struct rte_ipsec_sa);
159         return sz;
160 }
161
162 void
163 rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
164 {
165         memset(sa, 0, sa->size);
166 }
167
168 /*
169  * Determine expected SA type based on input parameters.
170  */
171 static int
172 fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
173 {
174         uint64_t tp;
175
176         tp = 0;
177
178         if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
179                 tp |= RTE_IPSEC_SATP_PROTO_AH;
180         else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
181                 tp |= RTE_IPSEC_SATP_PROTO_ESP;
182         else
183                 return -EINVAL;
184
185         if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
186                 tp |= RTE_IPSEC_SATP_DIR_OB;
187         else if (prm->ipsec_xform.direction ==
188                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
189                 tp |= RTE_IPSEC_SATP_DIR_IB;
190         else
191                 return -EINVAL;
192
193         if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
194                 if (prm->ipsec_xform.tunnel.type ==
195                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
196                         tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
197                 else if (prm->ipsec_xform.tunnel.type ==
198                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6)
199                         tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
200                 else
201                         return -EINVAL;
202
203                 if (prm->tun.next_proto == IPPROTO_IPIP)
204                         tp |= RTE_IPSEC_SATP_IPV4;
205                 else if (prm->tun.next_proto == IPPROTO_IPV6)
206                         tp |= RTE_IPSEC_SATP_IPV6;
207                 else
208                         return -EINVAL;
209         } else if (prm->ipsec_xform.mode ==
210                         RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
211                 tp |= RTE_IPSEC_SATP_MODE_TRANS;
212                 if (prm->trs.proto == IPPROTO_IPIP)
213                         tp |= RTE_IPSEC_SATP_IPV4;
214                 else if (prm->trs.proto == IPPROTO_IPV6)
215                         tp |= RTE_IPSEC_SATP_IPV6;
216                 else
217                         return -EINVAL;
218         } else
219                 return -EINVAL;
220
221         /* check for UDP encapsulation flag */
222         if (prm->ipsec_xform.options.udp_encap == 1)
223                 tp |= RTE_IPSEC_SATP_NATT_ENABLE;
224
225         /* check for ESN flag */
226         if (prm->ipsec_xform.options.esn == 0)
227                 tp |= RTE_IPSEC_SATP_ESN_DISABLE;
228         else
229                 tp |= RTE_IPSEC_SATP_ESN_ENABLE;
230
231         /* check for ECN flag */
232         if (prm->ipsec_xform.options.ecn == 0)
233                 tp |= RTE_IPSEC_SATP_ECN_DISABLE;
234         else
235                 tp |= RTE_IPSEC_SATP_ECN_ENABLE;
236
237         /* check for DSCP flag */
238         if (prm->ipsec_xform.options.copy_dscp == 0)
239                 tp |= RTE_IPSEC_SATP_DSCP_DISABLE;
240         else
241                 tp |= RTE_IPSEC_SATP_DSCP_ENABLE;
242
243         /* interpret flags */
244         if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
245                 tp |= RTE_IPSEC_SATP_SQN_ATOM;
246         else
247                 tp |= RTE_IPSEC_SATP_SQN_RAW;
248
249         *type = tp;
250         return 0;
251 }
252
253 /*
254  * Init ESP inbound specific things.
255  */
256 static void
257 esp_inb_init(struct rte_ipsec_sa *sa)
258 {
259         /* these params may differ with new algorithms support */
260         sa->ctp.cipher.offset = sizeof(struct rte_esp_hdr) + sa->iv_len;
261         sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
262
263         /*
264          * for AEAD algorithms we can assume that
265          * auth and cipher offsets would be equal.
266          */
267         switch (sa->algo_type) {
268         case ALGO_TYPE_AES_GCM:
269         case ALGO_TYPE_AES_CCM:
270         case ALGO_TYPE_CHACHA20_POLY1305:
271                 sa->ctp.auth.raw = sa->ctp.cipher.raw;
272                 break;
273         default:
274                 sa->ctp.auth.offset = 0;
275                 sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
276                 sa->cofs.ofs.cipher.tail = sa->sqh_len;
277                 break;
278         }
279
280         sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
281 }
282
283 /*
284  * Init ESP inbound tunnel specific things.
285  */
286 static void
287 esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
288 {
289         sa->proto = prm->tun.next_proto;
290         esp_inb_init(sa);
291 }
292
293 /*
294  * Init ESP outbound specific things.
295  */
296 static void
297 esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen, uint64_t sqn)
298 {
299         uint8_t algo_type;
300
301         sa->sqn.outb = sqn > 1 ? sqn : 1;
302
303         algo_type = sa->algo_type;
304
305         /*
306          * Setup auth and cipher length and offset.
307          * these params may differ with new algorithms support
308          */
309
310         switch (algo_type) {
311         case ALGO_TYPE_AES_GCM:
312         case ALGO_TYPE_AES_CCM:
313         case ALGO_TYPE_CHACHA20_POLY1305:
314         case ALGO_TYPE_AES_CTR:
315         case ALGO_TYPE_NULL:
316                 sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr) +
317                         sa->iv_len;
318                 sa->ctp.cipher.length = 0;
319                 break;
320         case ALGO_TYPE_AES_CBC:
321         case ALGO_TYPE_3DES_CBC:
322                 sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr);
323                 sa->ctp.cipher.length = sa->iv_len;
324                 break;
325         case ALGO_TYPE_AES_GMAC:
326                 sa->ctp.cipher.offset = 0;
327                 sa->ctp.cipher.length = 0;
328                 break;
329         }
330
331         /*
332          * for AEAD algorithms we can assume that
333          * auth and cipher offsets would be equal.
334          */
335         switch (algo_type) {
336         case ALGO_TYPE_AES_GCM:
337         case ALGO_TYPE_AES_CCM:
338         case ALGO_TYPE_CHACHA20_POLY1305:
339                 sa->ctp.auth.raw = sa->ctp.cipher.raw;
340                 break;
341         default:
342                 sa->ctp.auth.offset = hlen;
343                 sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
344                         sa->iv_len + sa->sqh_len;
345                 break;
346         }
347
348         sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
349         sa->cofs.ofs.cipher.tail = (sa->ctp.auth.offset + sa->ctp.auth.length) -
350                         (sa->ctp.cipher.offset + sa->ctp.cipher.length);
351 }
352
353 /*
354  * Init ESP outbound tunnel specific things.
355  */
356 static void
357 esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
358 {
359         sa->proto = prm->tun.next_proto;
360         sa->hdr_len = prm->tun.hdr_len;
361         sa->hdr_l3_off = prm->tun.hdr_l3_off;
362
363         memcpy(sa->hdr, prm->tun.hdr, prm->tun.hdr_len);
364
365         /* insert UDP header if UDP encapsulation is inabled */
366         if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
367                 struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
368                                 &sa->hdr[prm->tun.hdr_len];
369                 sa->hdr_len += sizeof(struct rte_udp_hdr);
370                 udph->src_port = prm->ipsec_xform.udp.sport;
371                 udph->dst_port = prm->ipsec_xform.udp.dport;
372                 udph->dgram_cksum = 0;
373         }
374
375         /* update l2_len and l3_len fields for outbound mbuf */
376         sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
377                 sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
378
379         esp_outb_init(sa, sa->hdr_len, prm->ipsec_xform.esn.value);
380 }
381
382 /*
383  * helper function, init SA structure.
384  */
385 static int
386 esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
387         const struct crypto_xform *cxf)
388 {
389         static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
390                                 RTE_IPSEC_SATP_MODE_MASK |
391                                 RTE_IPSEC_SATP_NATT_MASK;
392
393         if (prm->ipsec_xform.options.ecn)
394                 sa->tos_mask |= RTE_IPV4_HDR_ECN_MASK;
395
396         if (prm->ipsec_xform.options.copy_dscp)
397                 sa->tos_mask |= RTE_IPV4_HDR_DSCP_MASK;
398
399         if (cxf->aead != NULL) {
400                 switch (cxf->aead->algo) {
401                 case RTE_CRYPTO_AEAD_AES_GCM:
402                         /* RFC 4106 */
403                         sa->aad_len = sizeof(struct aead_gcm_aad);
404                         sa->icv_len = cxf->aead->digest_length;
405                         sa->iv_ofs = cxf->aead->iv.offset;
406                         sa->iv_len = sizeof(uint64_t);
407                         sa->pad_align = IPSEC_PAD_AES_GCM;
408                         sa->algo_type = ALGO_TYPE_AES_GCM;
409                         break;
410                 case RTE_CRYPTO_AEAD_AES_CCM:
411                         /* RFC 4309 */
412                         sa->aad_len = sizeof(struct aead_ccm_aad);
413                         sa->icv_len = cxf->aead->digest_length;
414                         sa->iv_ofs = cxf->aead->iv.offset;
415                         sa->iv_len = sizeof(uint64_t);
416                         sa->pad_align = IPSEC_PAD_AES_CCM;
417                         sa->algo_type = ALGO_TYPE_AES_CCM;
418                         break;
419                 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
420                         /* RFC 7634 & 8439*/
421                         sa->aad_len = sizeof(struct aead_chacha20_poly1305_aad);
422                         sa->icv_len = cxf->aead->digest_length;
423                         sa->iv_ofs = cxf->aead->iv.offset;
424                         sa->iv_len = sizeof(uint64_t);
425                         sa->pad_align = IPSEC_PAD_CHACHA20_POLY1305;
426                         sa->algo_type = ALGO_TYPE_CHACHA20_POLY1305;
427                         break;
428                 default:
429                         return -EINVAL;
430                 }
431         } else if (cxf->auth->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
432                 /* RFC 4543 */
433                 /* AES-GMAC is a special case of auth that needs IV */
434                 sa->pad_align = IPSEC_PAD_AES_GMAC;
435                 sa->iv_len = sizeof(uint64_t);
436                 sa->icv_len = cxf->auth->digest_length;
437                 sa->iv_ofs = cxf->auth->iv.offset;
438                 sa->algo_type = ALGO_TYPE_AES_GMAC;
439
440         } else {
441                 sa->icv_len = cxf->auth->digest_length;
442                 sa->iv_ofs = cxf->cipher->iv.offset;
443
444                 switch (cxf->cipher->algo) {
445                 case RTE_CRYPTO_CIPHER_NULL:
446                         sa->pad_align = IPSEC_PAD_NULL;
447                         sa->iv_len = 0;
448                         sa->algo_type = ALGO_TYPE_NULL;
449                         break;
450
451                 case RTE_CRYPTO_CIPHER_AES_CBC:
452                         sa->pad_align = IPSEC_PAD_AES_CBC;
453                         sa->iv_len = IPSEC_MAX_IV_SIZE;
454                         sa->algo_type = ALGO_TYPE_AES_CBC;
455                         break;
456
457                 case RTE_CRYPTO_CIPHER_AES_CTR:
458                         /* RFC 3686 */
459                         sa->pad_align = IPSEC_PAD_AES_CTR;
460                         sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
461                         sa->algo_type = ALGO_TYPE_AES_CTR;
462                         break;
463
464                 case RTE_CRYPTO_CIPHER_3DES_CBC:
465                         /* RFC 1851 */
466                         sa->pad_align = IPSEC_PAD_3DES_CBC;
467                         sa->iv_len = IPSEC_3DES_IV_SIZE;
468                         sa->algo_type = ALGO_TYPE_3DES_CBC;
469                         break;
470
471                 default:
472                         return -EINVAL;
473                 }
474         }
475
476         sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
477         sa->udata = prm->userdata;
478         sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
479         sa->salt = prm->ipsec_xform.salt;
480
481         /* preserve all values except l2_len and l3_len */
482         sa->tx_offload.msk =
483                 ~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
484                                 0, 0, 0, 0, 0);
485
486         switch (sa->type & msk) {
487         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
488         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
489                 esp_inb_tun_init(sa, prm);
490                 break;
491         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
492                 esp_inb_init(sa);
493                 break;
494         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4 |
495                         RTE_IPSEC_SATP_NATT_ENABLE):
496         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6 |
497                         RTE_IPSEC_SATP_NATT_ENABLE):
498         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
499         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
500                 esp_outb_tun_init(sa, prm);
501                 break;
502         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS |
503                         RTE_IPSEC_SATP_NATT_ENABLE):
504         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
505                 esp_outb_init(sa, 0, prm->ipsec_xform.esn.value);
506                 break;
507         }
508
509         return 0;
510 }
511
512 /*
513  * helper function, init SA replay structure.
514  */
515 static void
516 fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket,
517         uint64_t sqn)
518 {
519         sa->replay.win_sz = wnd_sz;
520         sa->replay.nb_bucket = nb_bucket;
521         sa->replay.bucket_index_mask = nb_bucket - 1;
522         sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
523         sa->sqn.inb.rsn[0]->sqn = sqn;
524         if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM) {
525                 sa->sqn.inb.rsn[1] = (struct replay_sqn *)
526                         ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
527                 sa->sqn.inb.rsn[1]->sqn = sqn;
528         }
529 }
530
531 int
532 rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
533 {
534         uint64_t type;
535         uint32_t nb, wsz;
536         int32_t rc;
537
538         if (prm == NULL)
539                 return -EINVAL;
540
541         /* determine SA type */
542         rc = fill_sa_type(prm, &type);
543         if (rc != 0)
544                 return rc;
545
546         /* determine required size */
547         wsz = prm->ipsec_xform.replay_win_sz;
548         return ipsec_sa_size(type, &wsz, &nb);
549 }
550
551 int
552 rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
553         uint32_t size)
554 {
555         int32_t rc, sz;
556         uint32_t nb, wsz;
557         uint64_t type;
558         struct crypto_xform cxf;
559
560         if (sa == NULL || prm == NULL)
561                 return -EINVAL;
562
563         /* determine SA type */
564         rc = fill_sa_type(prm, &type);
565         if (rc != 0)
566                 return rc;
567
568         /* determine required size */
569         wsz = prm->ipsec_xform.replay_win_sz;
570         sz = ipsec_sa_size(type, &wsz, &nb);
571         if (sz < 0)
572                 return sz;
573         else if (size < (uint32_t)sz)
574                 return -ENOSPC;
575
576         /* only esp is supported right now */
577         if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
578                 return -EINVAL;
579
580         if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
581                 uint32_t hlen = prm->tun.hdr_len;
582                 if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE)
583                         hlen += sizeof(struct rte_udp_hdr);
584                 if (hlen > sizeof(sa->hdr))
585                         return -EINVAL;
586         }
587
588         rc = fill_crypto_xform(&cxf, type, prm);
589         if (rc != 0)
590                 return rc;
591
592         /* initialize SA */
593
594         memset(sa, 0, sz);
595         sa->type = type;
596         sa->size = sz;
597
598         /* check for ESN flag */
599         sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
600                 UINT32_MAX : UINT64_MAX;
601
602         rc = esp_sa_init(sa, prm, &cxf);
603         if (rc != 0)
604                 rte_ipsec_sa_fini(sa);
605
606         /* fill replay window related fields */
607         if (nb != 0)
608                 fill_sa_replay(sa, wsz, nb, prm->ipsec_xform.esn.value);
609
610         return sz;
611 }
612
613 /*
614  *  setup crypto ops for LOOKASIDE_PROTO type of devices.
615  */
616 static inline void
617 lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
618         struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
619 {
620         uint32_t i;
621         struct rte_crypto_sym_op *sop;
622
623         for (i = 0; i != num; i++) {
624                 sop = cop[i]->sym;
625                 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
626                 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
627                 cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
628                 sop->m_src = mb[i];
629                 __rte_security_attach_session(sop, ss->security.ses);
630         }
631 }
632
633 /*
634  *  setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
635  *  Note that for LOOKASIDE_PROTO all packet modifications will be
636  *  performed by PMD/HW.
637  *  SW has only to prepare crypto op.
638  */
639 static uint16_t
640 lksd_proto_prepare(const struct rte_ipsec_session *ss,
641         struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
642 {
643         lksd_proto_cop_prepare(ss, mb, cop, num);
644         return num;
645 }
646
647 /*
648  * simplest pkt process routine:
649  * all actual processing is already done by HW/PMD,
650  * just check mbuf ol_flags.
651  * used for:
652  * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
653  * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
654  * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
655  */
656 uint16_t
657 pkt_flag_process(const struct rte_ipsec_session *ss,
658                 struct rte_mbuf *mb[], uint16_t num)
659 {
660         uint32_t i, k, bytes;
661         uint32_t dr[num];
662
663         RTE_SET_USED(ss);
664
665         k = 0;
666         bytes = 0;
667         for (i = 0; i != num; i++) {
668                 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
669                         k++;
670                         bytes += mb[i]->pkt_len;
671                 }
672                 else
673                         dr[i - k] = i;
674         }
675
676         ss->sa->statistics.count += k;
677         ss->sa->statistics.bytes += bytes;
678
679         /* handle unprocessed mbufs */
680         if (k != num) {
681                 rte_errno = EBADMSG;
682                 if (k != 0)
683                         move_bad_mbufs(mb, dr, num, num - k);
684         }
685
686         return k;
687 }
688
689 /*
690  * Select packet processing function for session on LOOKASIDE_NONE
691  * type of device.
692  */
693 static int
694 lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
695                 struct rte_ipsec_sa_pkt_func *pf)
696 {
697         int32_t rc;
698
699         static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
700                         RTE_IPSEC_SATP_MODE_MASK;
701
702         rc = 0;
703         switch (sa->type & msk) {
704         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
705         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
706                 pf->prepare.async = esp_inb_pkt_prepare;
707                 pf->process = esp_inb_tun_pkt_process;
708                 break;
709         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
710                 pf->prepare.async = esp_inb_pkt_prepare;
711                 pf->process = esp_inb_trs_pkt_process;
712                 break;
713         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
714         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
715                 pf->prepare.async = esp_outb_tun_prepare;
716                 pf->process = (sa->sqh_len != 0) ?
717                         esp_outb_sqh_process : pkt_flag_process;
718                 break;
719         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
720                 pf->prepare.async = esp_outb_trs_prepare;
721                 pf->process = (sa->sqh_len != 0) ?
722                         esp_outb_sqh_process : pkt_flag_process;
723                 break;
724         default:
725                 rc = -ENOTSUP;
726         }
727
728         return rc;
729 }
730
731 static int
732 cpu_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
733                 struct rte_ipsec_sa_pkt_func *pf)
734 {
735         int32_t rc;
736
737         static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
738                         RTE_IPSEC_SATP_MODE_MASK;
739
740         rc = 0;
741         switch (sa->type & msk) {
742         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
743         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
744                 pf->prepare.sync = cpu_inb_pkt_prepare;
745                 pf->process = esp_inb_tun_pkt_process;
746                 break;
747         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
748                 pf->prepare.sync = cpu_inb_pkt_prepare;
749                 pf->process = esp_inb_trs_pkt_process;
750                 break;
751         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
752         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
753                 pf->prepare.sync = cpu_outb_tun_pkt_prepare;
754                 pf->process = (sa->sqh_len != 0) ?
755                         esp_outb_sqh_process : pkt_flag_process;
756                 break;
757         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
758                 pf->prepare.sync = cpu_outb_trs_pkt_prepare;
759                 pf->process = (sa->sqh_len != 0) ?
760                         esp_outb_sqh_process : pkt_flag_process;
761                 break;
762         default:
763                 rc = -ENOTSUP;
764         }
765
766         return rc;
767 }
768
769 /*
770  * Select packet processing function for session on INLINE_CRYPTO
771  * type of device.
772  */
773 static int
774 inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
775                 struct rte_ipsec_sa_pkt_func *pf)
776 {
777         int32_t rc;
778
779         static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
780                         RTE_IPSEC_SATP_MODE_MASK;
781
782         rc = 0;
783         switch (sa->type & msk) {
784         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
785         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
786                 pf->process = inline_inb_tun_pkt_process;
787                 break;
788         case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
789                 pf->process = inline_inb_trs_pkt_process;
790                 break;
791         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
792         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
793                 pf->process = inline_outb_tun_pkt_process;
794                 break;
795         case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
796                 pf->process = inline_outb_trs_pkt_process;
797                 break;
798         default:
799                 rc = -ENOTSUP;
800         }
801
802         return rc;
803 }
804
805 /*
806  * Select packet processing function for given session based on SA parameters
807  * and type of associated with the session device.
808  */
809 int
810 ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
811         const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
812 {
813         int32_t rc;
814
815         rc = 0;
816         pf[0] = (struct rte_ipsec_sa_pkt_func) { {NULL}, NULL };
817
818         switch (ss->type) {
819         case RTE_SECURITY_ACTION_TYPE_NONE:
820                 rc = lksd_none_pkt_func_select(sa, pf);
821                 break;
822         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
823                 rc = inline_crypto_pkt_func_select(sa, pf);
824                 break;
825         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
826                 if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
827                                 RTE_IPSEC_SATP_DIR_IB)
828                         pf->process = pkt_flag_process;
829                 else
830                         pf->process = inline_proto_outb_pkt_process;
831                 break;
832         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
833                 pf->prepare.async = lksd_proto_prepare;
834                 pf->process = pkt_flag_process;
835                 break;
836         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
837                 rc = cpu_crypto_pkt_func_select(sa, pf);
838                 break;
839         default:
840                 rc = -ENOTSUP;
841         }
842
843         return rc;
844 }