1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
17 /* some helper structures */
19 struct rte_crypto_auth_xform *auth;
20 struct rte_crypto_cipher_xform *cipher;
21 struct rte_crypto_aead_xform *aead;
25 * helper routine, fills internal crypto_xform structure.
28 fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
29 const struct rte_ipsec_sa_prm *prm)
31 struct rte_crypto_sym_xform *xf, *xfn;
33 memset(xform, 0, sizeof(*xform));
35 xf = prm->crypto_xform;
41 /* for AEAD just one xform required */
42 if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
45 xform->aead = &xf->aead;
47 * CIPHER+AUTH xforms are expected in strict order,
48 * depending on SA direction:
49 * inbound: AUTH+CIPHER
50 * outbound: CIPHER+AUTH
52 } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
54 /* wrong order or no cipher */
55 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
56 xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
59 xform->auth = &xf->auth;
60 xform->cipher = &xfn->cipher;
64 /* wrong order or no auth */
65 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
66 xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
69 xform->cipher = &xf->cipher;
70 xform->auth = &xfn->auth;
76 uint64_t __rte_experimental
77 rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
83 ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
90 if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
93 * RFC 4303 recommends 64 as minimum window size.
94 * there is no point to use ESN mode without SQN window,
95 * so make sure we have at least 64 window when ESN is enalbed.
97 wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
98 RTE_IPSEC_SATP_ESN_DISABLE) ?
99 wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
101 n = replay_num_bucket(wsz);
104 if (n > WINDOW_BUCKET_MAX)
111 if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
112 sz *= REPLAY_SQN_NUM;
114 sz += sizeof(struct rte_ipsec_sa);
118 void __rte_experimental
119 rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
121 memset(sa, 0, sa->size);
125 * Determine expected SA type based on input parameters.
128 fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
134 if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
135 tp |= RTE_IPSEC_SATP_PROTO_AH;
136 else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
137 tp |= RTE_IPSEC_SATP_PROTO_ESP;
141 if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
142 tp |= RTE_IPSEC_SATP_DIR_OB;
143 else if (prm->ipsec_xform.direction ==
144 RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
145 tp |= RTE_IPSEC_SATP_DIR_IB;
149 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
150 if (prm->ipsec_xform.tunnel.type ==
151 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
152 tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
153 else if (prm->ipsec_xform.tunnel.type ==
154 RTE_SECURITY_IPSEC_TUNNEL_IPV6)
155 tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
159 if (prm->tun.next_proto == IPPROTO_IPIP)
160 tp |= RTE_IPSEC_SATP_IPV4;
161 else if (prm->tun.next_proto == IPPROTO_IPV6)
162 tp |= RTE_IPSEC_SATP_IPV6;
165 } else if (prm->ipsec_xform.mode ==
166 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
167 tp |= RTE_IPSEC_SATP_MODE_TRANS;
168 if (prm->trs.proto == IPPROTO_IPIP)
169 tp |= RTE_IPSEC_SATP_IPV4;
170 else if (prm->trs.proto == IPPROTO_IPV6)
171 tp |= RTE_IPSEC_SATP_IPV6;
177 /* check for ESN flag */
178 if (prm->ipsec_xform.options.esn == 0)
179 tp |= RTE_IPSEC_SATP_ESN_DISABLE;
181 tp |= RTE_IPSEC_SATP_ESN_ENABLE;
183 /* interpret flags */
184 if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
185 tp |= RTE_IPSEC_SATP_SQN_ATOM;
187 tp |= RTE_IPSEC_SATP_SQN_RAW;
194 * Init ESP inbound specific things.
197 esp_inb_init(struct rte_ipsec_sa *sa)
199 /* these params may differ with new algorithms support */
200 sa->ctp.auth.offset = 0;
201 sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
202 sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
203 sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
207 * Init ESP inbound tunnel specific things.
210 esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
212 sa->proto = prm->tun.next_proto;
217 * Init ESP outbound specific things.
220 esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
224 sa->sqn.outb.raw = 1;
226 /* these params may differ with new algorithms support */
227 sa->ctp.auth.offset = hlen;
228 sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
230 algo_type = sa->algo_type;
233 case ALGO_TYPE_AES_GCM:
234 case ALGO_TYPE_AES_CTR:
236 sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
238 sa->ctp.cipher.length = 0;
240 case ALGO_TYPE_AES_CBC:
241 sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
242 sa->ctp.cipher.length = sa->iv_len;
248 * Init ESP outbound tunnel specific things.
251 esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
253 sa->proto = prm->tun.next_proto;
254 sa->hdr_len = prm->tun.hdr_len;
255 sa->hdr_l3_off = prm->tun.hdr_l3_off;
256 memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
258 esp_outb_init(sa, sa->hdr_len);
262 * helper function, init SA structure.
265 esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
266 const struct crypto_xform *cxf)
268 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
269 RTE_IPSEC_SATP_MODE_MASK;
271 if (cxf->aead != NULL) {
272 switch (cxf->aead->algo) {
273 case RTE_CRYPTO_AEAD_AES_GCM:
275 sa->aad_len = sizeof(struct aead_gcm_aad);
276 sa->icv_len = cxf->aead->digest_length;
277 sa->iv_ofs = cxf->aead->iv.offset;
278 sa->iv_len = sizeof(uint64_t);
279 sa->pad_align = IPSEC_PAD_AES_GCM;
280 sa->algo_type = ALGO_TYPE_AES_GCM;
286 sa->icv_len = cxf->auth->digest_length;
287 sa->iv_ofs = cxf->cipher->iv.offset;
288 sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
290 switch (cxf->cipher->algo) {
291 case RTE_CRYPTO_CIPHER_NULL:
292 sa->pad_align = IPSEC_PAD_NULL;
294 sa->algo_type = ALGO_TYPE_NULL;
297 case RTE_CRYPTO_CIPHER_AES_CBC:
298 sa->pad_align = IPSEC_PAD_AES_CBC;
299 sa->iv_len = IPSEC_MAX_IV_SIZE;
300 sa->algo_type = ALGO_TYPE_AES_CBC;
303 case RTE_CRYPTO_CIPHER_AES_CTR:
305 sa->pad_align = IPSEC_PAD_AES_CTR;
306 sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
307 sa->algo_type = ALGO_TYPE_AES_CTR;
315 sa->udata = prm->userdata;
316 sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
317 sa->salt = prm->ipsec_xform.salt;
319 switch (sa->type & msk) {
320 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
321 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
322 esp_inb_tun_init(sa, prm);
324 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
327 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
328 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
329 esp_outb_tun_init(sa, prm);
331 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
332 esp_outb_init(sa, 0);
340 * helper function, init SA replay structure.
343 fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket)
345 sa->replay.win_sz = wnd_sz;
346 sa->replay.nb_bucket = nb_bucket;
347 sa->replay.bucket_index_mask = nb_bucket - 1;
348 sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
349 if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
350 sa->sqn.inb.rsn[1] = (struct replay_sqn *)
351 ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
354 int __rte_experimental
355 rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
364 /* determine SA type */
365 rc = fill_sa_type(prm, &type);
369 /* determine required size */
370 wsz = prm->replay_win_sz;
371 return ipsec_sa_size(type, &wsz, &nb);
374 int __rte_experimental
375 rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
381 struct crypto_xform cxf;
383 if (sa == NULL || prm == NULL)
386 /* determine SA type */
387 rc = fill_sa_type(prm, &type);
391 /* determine required size */
392 wsz = prm->replay_win_sz;
393 sz = ipsec_sa_size(type, &wsz, &nb);
396 else if (size < (uint32_t)sz)
399 /* only esp is supported right now */
400 if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
403 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
404 prm->tun.hdr_len > sizeof(sa->hdr))
407 rc = fill_crypto_xform(&cxf, type, prm);
417 /* check for ESN flag */
418 sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
419 UINT32_MAX : UINT64_MAX;
421 rc = esp_sa_init(sa, prm, &cxf);
423 rte_ipsec_sa_fini(sa);
425 /* fill replay window related fields */
427 fill_sa_replay(sa, wsz, nb);
433 mbuf_bulk_copy(struct rte_mbuf *dst[], struct rte_mbuf * const src[],
438 for (i = 0; i != num; i++)
443 * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.
446 lksd_none_cop_prepare(const struct rte_ipsec_session *ss,
447 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
450 struct rte_crypto_sym_op *sop;
452 for (i = 0; i != num; i++) {
454 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
455 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
456 cop[i]->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
458 __rte_crypto_sym_op_attach_sym_session(sop, ss->crypto.ses);
463 * setup crypto op and crypto sym op for ESP outbound packet.
466 esp_outb_cop_prepare(struct rte_crypto_op *cop,
467 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
468 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
470 struct rte_crypto_sym_op *sop;
471 struct aead_gcm_iv *gcm;
472 struct aesctr_cnt_blk *ctr;
473 uint8_t algo_type = sa->algo_type;
475 /* fill sym op fields */
479 case ALGO_TYPE_AES_GCM:
480 /* AEAD (AES_GCM) case */
481 sop->aead.data.offset = sa->ctp.cipher.offset + hlen;
482 sop->aead.data.length = sa->ctp.cipher.length + plen;
483 sop->aead.digest.data = icv->va;
484 sop->aead.digest.phys_addr = icv->pa;
485 sop->aead.aad.data = icv->va + sa->icv_len;
486 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
488 /* fill AAD IV (located inside crypto op) */
489 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
491 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
493 case ALGO_TYPE_AES_CBC:
494 /* Cipher-Auth (AES-CBC *) case */
495 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
496 sop->cipher.data.length = sa->ctp.cipher.length + plen;
497 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
498 sop->auth.data.length = sa->ctp.auth.length + plen;
499 sop->auth.digest.data = icv->va;
500 sop->auth.digest.phys_addr = icv->pa;
502 case ALGO_TYPE_AES_CTR:
503 /* Cipher-Auth (AES-CTR *) case */
504 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
505 sop->cipher.data.length = sa->ctp.cipher.length + plen;
506 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
507 sop->auth.data.length = sa->ctp.auth.length + plen;
508 sop->auth.digest.data = icv->va;
509 sop->auth.digest.phys_addr = icv->pa;
511 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
513 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
517 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
518 sop->cipher.data.length = sa->ctp.cipher.length + plen;
519 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
520 sop->auth.data.length = sa->ctp.auth.length + plen;
521 sop->auth.digest.data = icv->va;
522 sop->auth.digest.phys_addr = icv->pa;
530 * setup/update packet data and metadata for ESP outbound tunnel case.
532 static inline int32_t
533 esp_outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
534 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
535 union sym_op_data *icv)
537 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
539 struct esp_hdr *esph;
540 struct esp_tail *espt;
544 /* calculate extra header space required */
545 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
547 /* size of ipsec protected data */
549 plen = mb->pkt_len - mb->l2_len;
551 /* number of bytes to encrypt */
552 clen = plen + sizeof(*espt);
553 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
555 /* pad length + esp tail */
557 tlen = pdlen + sa->icv_len;
559 /* do append and prepend */
560 ml = rte_pktmbuf_lastseg(mb);
561 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
565 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
570 pdofs = ml->data_len;
571 ml->data_len += tlen;
573 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
575 /* update pkt l2/l3 len */
576 mb->l2_len = sa->hdr_l3_off;
577 mb->l3_len = sa->hdr_len - sa->hdr_l3_off;
579 /* copy tunnel pkt header */
580 rte_memcpy(ph, sa->hdr, sa->hdr_len);
582 /* update original and new ip header fields */
583 update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
586 /* update spi, seqn and iv */
587 esph = (struct esp_hdr *)(ph + sa->hdr_len);
588 iv = (uint64_t *)(esph + 1);
589 copy_iv(iv, ivp, sa->iv_len);
592 esph->seq = sqn_low32(sqc);
595 pdofs += pdlen + sa->sqh_len;
598 pdlen -= sizeof(*espt);
600 /* copy padding data */
601 rte_memcpy(pt, esp_pad_bytes, pdlen);
603 /* update esp trailer */
604 espt = (struct esp_tail *)(pt + pdlen);
605 espt->pad_len = pdlen;
606 espt->next_proto = sa->proto;
608 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
609 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
615 * for pure cryptodev (lookaside none) depending on SA settings,
616 * we might have to write some extra data to the packet.
619 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
620 const union sym_op_data *icv)
623 struct aead_gcm_aad *aad;
624 uint8_t algo_type = sa->algo_type;
626 /* insert SQN.hi between ESP trailer and ICV */
627 if (sa->sqh_len != 0) {
628 psqh = (uint32_t *)(icv->va - sa->sqh_len);
629 psqh[0] = sqn_hi32(sqc);
633 * fill IV and AAD fields, if any (aad fields are placed after icv),
634 * right now we support only one AEAD algorithm: AES-GCM .
636 if (algo_type == ALGO_TYPE_AES_GCM) {
637 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
638 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
643 * setup/update packets and crypto ops for ESP outbound tunnel case.
646 outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
647 struct rte_crypto_op *cop[], uint16_t num)
653 struct rte_ipsec_sa *sa;
654 union sym_op_data icv;
655 uint64_t iv[IPSEC_MAX_IV_QWORD];
656 struct rte_mbuf *dr[num];
661 sqn = esn_outb_update_sqn(sa, &n);
663 rte_errno = EOVERFLOW;
666 for (i = 0; i != n; i++) {
668 sqc = rte_cpu_to_be_64(sqn + i);
671 /* try to update the packet itself */
672 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
674 /* success, setup crypto op */
677 outb_pkt_xprepare(sa, sqc, &icv);
678 esp_outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
680 /* failure, put packet into the death-row */
688 lksd_none_cop_prepare(ss, mb, cop, k);
690 /* copy not prepared mbufs beyond good ones */
691 if (k != n && k != 0)
692 mbuf_bulk_copy(mb + k, dr, n - k);
698 * setup/update packet data and metadata for ESP outbound transport case.
700 static inline int32_t
701 esp_outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
702 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
703 uint32_t l2len, uint32_t l3len, union sym_op_data *icv)
706 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
708 struct esp_hdr *esph;
709 struct esp_tail *espt;
713 uhlen = l2len + l3len;
714 plen = mb->pkt_len - uhlen;
716 /* calculate extra header space required */
717 hlen = sa->iv_len + sizeof(*esph);
719 /* number of bytes to encrypt */
720 clen = plen + sizeof(*espt);
721 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
723 /* pad length + esp tail */
725 tlen = pdlen + sa->icv_len;
727 /* do append and insert */
728 ml = rte_pktmbuf_lastseg(mb);
729 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
732 /* prepend space for ESP header */
733 ph = rte_pktmbuf_prepend(mb, hlen);
738 pdofs = ml->data_len;
739 ml->data_len += tlen;
741 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
743 /* shift L2/L3 headers */
744 insert_esph(ph, ph + hlen, uhlen);
746 /* update ip header fields */
747 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
750 /* update spi, seqn and iv */
751 esph = (struct esp_hdr *)(ph + uhlen);
752 iv = (uint64_t *)(esph + 1);
753 copy_iv(iv, ivp, sa->iv_len);
756 esph->seq = sqn_low32(sqc);
759 pdofs += pdlen + sa->sqh_len;
762 pdlen -= sizeof(*espt);
764 /* copy padding data */
765 rte_memcpy(pt, esp_pad_bytes, pdlen);
767 /* update esp trailer */
768 espt = (struct esp_tail *)(pt + pdlen);
769 espt->pad_len = pdlen;
770 espt->next_proto = np;
772 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
773 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
779 * setup/update packets and crypto ops for ESP outbound transport case.
782 outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
783 struct rte_crypto_op *cop[], uint16_t num)
786 uint32_t i, k, n, l2, l3;
789 struct rte_ipsec_sa *sa;
790 union sym_op_data icv;
791 uint64_t iv[IPSEC_MAX_IV_QWORD];
792 struct rte_mbuf *dr[num];
797 sqn = esn_outb_update_sqn(sa, &n);
799 rte_errno = EOVERFLOW;
802 for (i = 0; i != n; i++) {
807 sqc = rte_cpu_to_be_64(sqn + i);
810 /* try to update the packet itself */
811 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
814 /* success, setup crypto op */
817 outb_pkt_xprepare(sa, sqc, &icv);
818 esp_outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
820 /* failure, put packet into the death-row */
828 lksd_none_cop_prepare(ss, mb, cop, k);
830 /* copy not prepared mbufs beyond good ones */
831 if (k != n && k != 0)
832 mbuf_bulk_copy(mb + k, dr, n - k);
838 * setup crypto op and crypto sym op for ESP inbound tunnel packet.
840 static inline int32_t
841 esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,
842 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
843 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
845 struct rte_crypto_sym_op *sop;
846 struct aead_gcm_iv *gcm;
847 struct aesctr_cnt_blk *ctr;
850 uint8_t algo_type = sa->algo_type;
852 clen = plen - sa->ctp.cipher.length;
853 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
856 /* fill sym op fields */
860 case ALGO_TYPE_AES_GCM:
861 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
862 sop->aead.data.length = clen;
863 sop->aead.digest.data = icv->va;
864 sop->aead.digest.phys_addr = icv->pa;
865 sop->aead.aad.data = icv->va + sa->icv_len;
866 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
868 /* fill AAD IV (located inside crypto op) */
869 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
871 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
872 pofs + sizeof(struct esp_hdr));
873 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
875 case ALGO_TYPE_AES_CBC:
876 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
877 sop->cipher.data.length = clen;
878 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
879 sop->auth.data.length = plen - sa->ctp.auth.length;
880 sop->auth.digest.data = icv->va;
881 sop->auth.digest.phys_addr = icv->pa;
883 /* copy iv from the input packet to the cop */
884 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
885 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
886 pofs + sizeof(struct esp_hdr));
887 copy_iv(ivc, ivp, sa->iv_len);
889 case ALGO_TYPE_AES_CTR:
890 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
891 sop->cipher.data.length = clen;
892 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
893 sop->auth.data.length = plen - sa->ctp.auth.length;
894 sop->auth.digest.data = icv->va;
895 sop->auth.digest.phys_addr = icv->pa;
897 /* copy iv from the input packet to the cop */
898 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
900 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
901 pofs + sizeof(struct esp_hdr));
902 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
905 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
906 sop->cipher.data.length = clen;
907 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
908 sop->auth.data.length = plen - sa->ctp.auth.length;
909 sop->auth.digest.data = icv->va;
910 sop->auth.digest.phys_addr = icv->pa;
921 * for pure cryptodev (lookaside none) depending on SA settings,
922 * we might have to write some extra data to the packet.
925 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
926 const union sym_op_data *icv)
928 struct aead_gcm_aad *aad;
930 /* insert SQN.hi between ESP trailer and ICV */
931 if (sa->sqh_len != 0)
932 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
935 * fill AAD fields, if any (aad fields are placed after icv),
936 * right now we support only one AEAD algorithm: AES-GCM.
938 if (sa->aad_len != 0) {
939 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
940 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
945 * setup/update packet data and metadata for ESP inbound tunnel case.
947 static inline int32_t
948 esp_inb_tun_pkt_prepare(const struct rte_ipsec_sa *sa,
949 const struct replay_sqn *rsn, struct rte_mbuf *mb,
950 uint32_t hlen, union sym_op_data *icv)
954 uint32_t icv_ofs, plen;
956 struct esp_hdr *esph;
958 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
961 * retrieve and reconstruct SQN, then check it, then
962 * convert it back into network byte order.
964 sqn = rte_be_to_cpu_32(esph->seq);
966 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
968 rc = esn_inb_check_sqn(rsn, sa, sqn);
972 sqn = rte_cpu_to_be_64(sqn);
974 /* start packet manipulation */
978 ml = rte_pktmbuf_lastseg(mb);
979 icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
981 /* we have to allocate space for AAD somewhere,
982 * right now - just use free trailing space at the last segment.
983 * Would probably be more convenient to reserve space for AAD
984 * inside rte_crypto_op itself
985 * (again for IV space is already reserved inside cop).
987 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
990 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
991 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
993 inb_pkt_xprepare(sa, sqn, icv);
998 * setup/update packets and crypto ops for ESP inbound case.
1001 inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1002 struct rte_crypto_op *cop[], uint16_t num)
1006 struct rte_ipsec_sa *sa;
1007 struct replay_sqn *rsn;
1008 union sym_op_data icv;
1009 struct rte_mbuf *dr[num];
1012 rsn = rsn_acquire(sa);
1015 for (i = 0; i != num; i++) {
1017 hl = mb[i]->l2_len + mb[i]->l3_len;
1018 rc = esp_inb_tun_pkt_prepare(sa, rsn, mb[i], hl, &icv);
1020 rc = esp_inb_tun_cop_prepare(cop[k], sa, mb[i], &icv,
1031 rsn_release(sa, rsn);
1034 lksd_none_cop_prepare(ss, mb, cop, k);
1036 /* copy not prepared mbufs beyond good ones */
1037 if (k != num && k != 0)
1038 mbuf_bulk_copy(mb + k, dr, num - k);
1044 * setup crypto ops for LOOKASIDE_PROTO type of devices.
1047 lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
1048 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
1051 struct rte_crypto_sym_op *sop;
1053 for (i = 0; i != num; i++) {
1055 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1056 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1057 cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
1059 __rte_security_attach_session(sop, ss->security.ses);
1064 * setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
1065 * Note that for LOOKASIDE_PROTO all packet modifications will be
1066 * performed by PMD/HW.
1067 * SW has only to prepare crypto op.
1070 lksd_proto_prepare(const struct rte_ipsec_session *ss,
1071 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
1073 lksd_proto_cop_prepare(ss, mb, cop, num);
1078 * process ESP inbound tunnel packet.
1081 esp_inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
1084 uint32_t hlen, icv_len, tlen;
1085 struct esp_hdr *esph;
1086 struct esp_tail *espt;
1087 struct rte_mbuf *ml;
1090 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1093 icv_len = sa->icv_len;
1095 ml = rte_pktmbuf_lastseg(mb);
1096 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1097 ml->data_len - icv_len - sizeof(*espt));
1100 * check padding and next proto.
1101 * return an error if something is wrong.
1103 pd = (char *)espt - espt->pad_len;
1104 if (espt->next_proto != sa->proto ||
1105 memcmp(pd, esp_pad_bytes, espt->pad_len))
1108 /* cut of ICV, ESP tail and padding bytes */
1109 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1110 ml->data_len -= tlen;
1111 mb->pkt_len -= tlen;
1113 /* cut of L2/L3 headers, ESP header and IV */
1114 hlen = mb->l2_len + mb->l3_len;
1115 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
1116 rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);
1118 /* retrieve SQN for later check */
1119 *sqn = rte_be_to_cpu_32(esph->seq);
1121 /* reset mbuf metatdata: L2/L3 len, packet type */
1122 mb->packet_type = RTE_PTYPE_UNKNOWN;
1126 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1127 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1132 * process ESP inbound transport packet.
1135 esp_inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
1138 uint32_t hlen, icv_len, l2len, l3len, tlen;
1139 struct esp_hdr *esph;
1140 struct esp_tail *espt;
1141 struct rte_mbuf *ml;
1144 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1147 icv_len = sa->icv_len;
1149 ml = rte_pktmbuf_lastseg(mb);
1150 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1151 ml->data_len - icv_len - sizeof(*espt));
1153 /* check padding, return an error if something is wrong. */
1154 pd = (char *)espt - espt->pad_len;
1155 if (memcmp(pd, esp_pad_bytes, espt->pad_len))
1158 /* cut of ICV, ESP tail and padding bytes */
1159 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1160 ml->data_len -= tlen;
1161 mb->pkt_len -= tlen;
1163 /* retrieve SQN for later check */
1166 hlen = l2len + l3len;
1167 op = rte_pktmbuf_mtod(mb, char *);
1168 esph = (struct esp_hdr *)(op + hlen);
1169 *sqn = rte_be_to_cpu_32(esph->seq);
1171 /* cut off ESP header and IV, update L3 header */
1172 np = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);
1173 remove_esph(np, op, hlen);
1174 update_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,
1177 /* reset mbuf packet type */
1178 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
1180 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1181 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1186 * for group of ESP inbound packets perform SQN check and update.
1188 static inline uint16_t
1189 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
1190 struct rte_mbuf *mb[], struct rte_mbuf *dr[], uint16_t num)
1193 struct replay_sqn *rsn;
1195 rsn = rsn_update_start(sa);
1198 for (i = 0; i != num; i++) {
1199 if (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)
1205 rsn_update_finish(sa, rsn);
1210 * process group of ESP inbound tunnel packets.
1213 inb_tun_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1217 struct rte_ipsec_sa *sa;
1219 struct rte_mbuf *dr[num];
1223 /* process packets, extract seq numbers */
1226 for (i = 0; i != num; i++) {
1228 if (esp_inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)
1230 /* bad packet, will drop from furhter processing */
1235 /* update seq # and replay winow */
1236 k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k);
1238 /* handle unprocessed mbufs */
1240 rte_errno = EBADMSG;
1242 mbuf_bulk_copy(mb + k, dr, num - k);
1249 * process group of ESP inbound transport packets.
1252 inb_trs_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1257 struct rte_ipsec_sa *sa;
1258 struct rte_mbuf *dr[num];
1262 /* process packets, extract seq numbers */
1265 for (i = 0; i != num; i++) {
1267 if (esp_inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)
1269 /* bad packet, will drop from furhter processing */
1274 /* update seq # and replay winow */
1275 k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k);
1277 /* handle unprocessed mbufs */
1279 rte_errno = EBADMSG;
1281 mbuf_bulk_copy(mb + k, dr, num - k);
1288 * process outbound packets for SA with ESN support,
1289 * for algorithms that require SQN.hibits to be implictly included
1290 * into digest computation.
1291 * In that case we have to move ICV bytes back to their proper place.
1294 outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1297 uint32_t i, k, icv_len, *icv;
1298 struct rte_mbuf *ml;
1299 struct rte_ipsec_sa *sa;
1300 struct rte_mbuf *dr[num];
1305 icv_len = sa->icv_len;
1307 for (i = 0; i != num; i++) {
1308 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
1309 ml = rte_pktmbuf_lastseg(mb[i]);
1310 icv = rte_pktmbuf_mtod_offset(ml, void *,
1311 ml->data_len - icv_len);
1312 remove_sqh(icv, icv_len);
1318 /* handle unprocessed mbufs */
1320 rte_errno = EBADMSG;
1322 mbuf_bulk_copy(mb + k, dr, num - k);
1329 * simplest pkt process routine:
1330 * all actual processing is already done by HW/PMD,
1331 * just check mbuf ol_flags.
1333 * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
1334 * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
1335 * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
1338 pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1342 struct rte_mbuf *dr[num];
1347 for (i = 0; i != num; i++) {
1348 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
1354 /* handle unprocessed mbufs */
1356 rte_errno = EBADMSG;
1358 mbuf_bulk_copy(mb + k, dr, num - k);
1365 * prepare packets for inline ipsec processing:
1366 * set ol_flags and attach metadata.
1369 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
1370 struct rte_mbuf *mb[], uint16_t num)
1372 uint32_t i, ol_flags;
1374 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
1375 for (i = 0; i != num; i++) {
1377 mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
1379 rte_security_set_pkt_metadata(ss->security.ctx,
1380 ss->security.ses, mb[i], NULL);
1385 * process group of ESP outbound tunnel packets destined for
1386 * INLINE_CRYPTO type of device.
1389 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
1390 struct rte_mbuf *mb[], uint16_t num)
1396 struct rte_ipsec_sa *sa;
1397 union sym_op_data icv;
1398 uint64_t iv[IPSEC_MAX_IV_QWORD];
1399 struct rte_mbuf *dr[num];
1404 sqn = esn_outb_update_sqn(sa, &n);
1406 rte_errno = EOVERFLOW;
1409 for (i = 0; i != n; i++) {
1411 sqc = rte_cpu_to_be_64(sqn + i);
1414 /* try to update the packet itself */
1415 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
1417 /* success, update mbuf fields */
1420 /* failure, put packet into the death-row */
1427 inline_outb_mbuf_prepare(ss, mb, k);
1429 /* copy not processed mbufs beyond good ones */
1430 if (k != n && k != 0)
1431 mbuf_bulk_copy(mb + k, dr, n - k);
1437 * process group of ESP outbound transport packets destined for
1438 * INLINE_CRYPTO type of device.
1441 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
1442 struct rte_mbuf *mb[], uint16_t num)
1445 uint32_t i, k, n, l2, l3;
1448 struct rte_ipsec_sa *sa;
1449 union sym_op_data icv;
1450 uint64_t iv[IPSEC_MAX_IV_QWORD];
1451 struct rte_mbuf *dr[num];
1456 sqn = esn_outb_update_sqn(sa, &n);
1458 rte_errno = EOVERFLOW;
1461 for (i = 0; i != n; i++) {
1466 sqc = rte_cpu_to_be_64(sqn + i);
1469 /* try to update the packet itself */
1470 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
1473 /* success, update mbuf fields */
1476 /* failure, put packet into the death-row */
1483 inline_outb_mbuf_prepare(ss, mb, k);
1485 /* copy not processed mbufs beyond good ones */
1486 if (k != n && k != 0)
1487 mbuf_bulk_copy(mb + k, dr, n - k);
1493 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1494 * actual processing is done by HW/PMD, just set flags and metadata.
1497 outb_inline_proto_process(const struct rte_ipsec_session *ss,
1498 struct rte_mbuf *mb[], uint16_t num)
1500 inline_outb_mbuf_prepare(ss, mb, num);
1505 * Select packet processing function for session on LOOKASIDE_NONE
1509 lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
1510 struct rte_ipsec_sa_pkt_func *pf)
1514 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1515 RTE_IPSEC_SATP_MODE_MASK;
1518 switch (sa->type & msk) {
1519 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1520 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1521 pf->prepare = inb_pkt_prepare;
1522 pf->process = inb_tun_pkt_process;
1524 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1525 pf->prepare = inb_pkt_prepare;
1526 pf->process = inb_trs_pkt_process;
1528 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1529 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1530 pf->prepare = outb_tun_prepare;
1531 pf->process = (sa->sqh_len != 0) ?
1532 outb_sqh_process : pkt_flag_process;
1534 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1535 pf->prepare = outb_trs_prepare;
1536 pf->process = (sa->sqh_len != 0) ?
1537 outb_sqh_process : pkt_flag_process;
1547 * Select packet processing function for session on INLINE_CRYPTO
1551 inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
1552 struct rte_ipsec_sa_pkt_func *pf)
1556 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1557 RTE_IPSEC_SATP_MODE_MASK;
1560 switch (sa->type & msk) {
1561 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1562 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1563 pf->process = inb_tun_pkt_process;
1565 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1566 pf->process = inb_trs_pkt_process;
1568 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1569 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1570 pf->process = inline_outb_tun_pkt_process;
1572 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1573 pf->process = inline_outb_trs_pkt_process;
1583 * Select packet processing function for given session based on SA parameters
1584 * and type of associated with the session device.
1587 ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
1588 const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
1593 pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 };
1596 case RTE_SECURITY_ACTION_TYPE_NONE:
1597 rc = lksd_none_pkt_func_select(sa, pf);
1599 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1600 rc = inline_crypto_pkt_func_select(sa, pf);
1602 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1603 if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
1604 RTE_IPSEC_SATP_DIR_IB)
1605 pf->process = pkt_flag_process;
1607 pf->process = outb_inline_proto_process;
1609 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
1610 pf->prepare = lksd_proto_prepare;
1611 pf->process = pkt_flag_process;