1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
17 /* some helper structures */
19 struct rte_crypto_auth_xform *auth;
20 struct rte_crypto_cipher_xform *cipher;
21 struct rte_crypto_aead_xform *aead;
25 * helper routine, fills internal crypto_xform structure.
28 fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
29 const struct rte_ipsec_sa_prm *prm)
31 struct rte_crypto_sym_xform *xf, *xfn;
33 memset(xform, 0, sizeof(*xform));
35 xf = prm->crypto_xform;
41 /* for AEAD just one xform required */
42 if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
45 xform->aead = &xf->aead;
47 * CIPHER+AUTH xforms are expected in strict order,
48 * depending on SA direction:
49 * inbound: AUTH+CIPHER
50 * outbound: CIPHER+AUTH
52 } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
54 /* wrong order or no cipher */
55 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
56 xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
59 xform->auth = &xf->auth;
60 xform->cipher = &xfn->cipher;
64 /* wrong order or no auth */
65 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
66 xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
69 xform->cipher = &xf->cipher;
70 xform->auth = &xfn->auth;
76 uint64_t __rte_experimental
77 rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
83 ipsec_sa_size(uint32_t wsz, uint64_t type, uint32_t *nb_bucket)
88 if (wsz != 0 && (type & RTE_IPSEC_SATP_DIR_MASK) ==
89 RTE_IPSEC_SATP_DIR_IB)
90 n = replay_num_bucket(wsz);
92 if (n > WINDOW_BUCKET_MAX)
98 sz += sizeof(struct rte_ipsec_sa);
102 void __rte_experimental
103 rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
105 memset(sa, 0, sa->size);
109 * Determine expected SA type based on input parameters.
112 fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
118 if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
119 tp |= RTE_IPSEC_SATP_PROTO_AH;
120 else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
121 tp |= RTE_IPSEC_SATP_PROTO_ESP;
125 if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
126 tp |= RTE_IPSEC_SATP_DIR_OB;
127 else if (prm->ipsec_xform.direction ==
128 RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
129 tp |= RTE_IPSEC_SATP_DIR_IB;
133 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
134 if (prm->ipsec_xform.tunnel.type ==
135 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
136 tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
137 else if (prm->ipsec_xform.tunnel.type ==
138 RTE_SECURITY_IPSEC_TUNNEL_IPV6)
139 tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
143 if (prm->tun.next_proto == IPPROTO_IPIP)
144 tp |= RTE_IPSEC_SATP_IPV4;
145 else if (prm->tun.next_proto == IPPROTO_IPV6)
146 tp |= RTE_IPSEC_SATP_IPV6;
149 } else if (prm->ipsec_xform.mode ==
150 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
151 tp |= RTE_IPSEC_SATP_MODE_TRANS;
152 if (prm->trs.proto == IPPROTO_IPIP)
153 tp |= RTE_IPSEC_SATP_IPV4;
154 else if (prm->trs.proto == IPPROTO_IPV6)
155 tp |= RTE_IPSEC_SATP_IPV6;
166 * Init ESP inbound specific things.
169 esp_inb_init(struct rte_ipsec_sa *sa)
171 /* these params may differ with new algorithms support */
172 sa->ctp.auth.offset = 0;
173 sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
174 sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
175 sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
179 * Init ESP inbound tunnel specific things.
182 esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
184 sa->proto = prm->tun.next_proto;
189 * Init ESP outbound specific things.
192 esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
196 /* these params may differ with new algorithms support */
197 sa->ctp.auth.offset = hlen;
198 sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
199 if (sa->aad_len != 0) {
200 sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
202 sa->ctp.cipher.length = 0;
204 sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
205 sa->ctp.cipher.length = sa->iv_len;
210 * Init ESP outbound tunnel specific things.
213 esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
215 sa->proto = prm->tun.next_proto;
216 sa->hdr_len = prm->tun.hdr_len;
217 sa->hdr_l3_off = prm->tun.hdr_l3_off;
218 memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
220 esp_outb_init(sa, sa->hdr_len);
224 * helper function, init SA structure.
227 esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
228 const struct crypto_xform *cxf)
230 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
231 RTE_IPSEC_SATP_MODE_MASK;
233 if (cxf->aead != NULL) {
235 if (cxf->aead->algo != RTE_CRYPTO_AEAD_AES_GCM)
237 sa->aad_len = sizeof(struct aead_gcm_aad);
238 sa->icv_len = cxf->aead->digest_length;
239 sa->iv_ofs = cxf->aead->iv.offset;
240 sa->iv_len = sizeof(uint64_t);
241 sa->pad_align = IPSEC_PAD_AES_GCM;
243 sa->icv_len = cxf->auth->digest_length;
244 sa->iv_ofs = cxf->cipher->iv.offset;
245 sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
246 if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_NULL) {
247 sa->pad_align = IPSEC_PAD_NULL;
249 } else if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_AES_CBC) {
250 sa->pad_align = IPSEC_PAD_AES_CBC;
251 sa->iv_len = IPSEC_MAX_IV_SIZE;
256 sa->udata = prm->userdata;
257 sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
258 sa->salt = prm->ipsec_xform.salt;
260 switch (sa->type & msk) {
261 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
262 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
263 esp_inb_tun_init(sa, prm);
265 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
268 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
269 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
270 esp_outb_tun_init(sa, prm);
272 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
273 esp_outb_init(sa, 0);
280 int __rte_experimental
281 rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
290 /* determine SA type */
291 rc = fill_sa_type(prm, &type);
295 /* determine required size */
296 return ipsec_sa_size(prm->replay_win_sz, type, &nb);
299 int __rte_experimental
300 rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
306 struct crypto_xform cxf;
308 if (sa == NULL || prm == NULL)
311 /* determine SA type */
312 rc = fill_sa_type(prm, &type);
316 /* determine required size */
317 sz = ipsec_sa_size(prm->replay_win_sz, type, &nb);
320 else if (size < (uint32_t)sz)
323 /* only esp is supported right now */
324 if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
327 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
328 prm->tun.hdr_len > sizeof(sa->hdr))
331 rc = fill_crypto_xform(&cxf, type, prm);
341 /* check for ESN flag */
342 sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
343 UINT32_MAX : UINT64_MAX;
345 rc = esp_sa_init(sa, prm, &cxf);
347 rte_ipsec_sa_fini(sa);
349 /* fill replay window related fields */
351 sa->replay.win_sz = prm->replay_win_sz;
352 sa->replay.nb_bucket = nb;
353 sa->replay.bucket_index_mask = sa->replay.nb_bucket - 1;
354 sa->sqn.inb = (struct replay_sqn *)(sa + 1);
361 mbuf_bulk_copy(struct rte_mbuf *dst[], struct rte_mbuf * const src[],
366 for (i = 0; i != num; i++)
371 * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.
374 lksd_none_cop_prepare(const struct rte_ipsec_session *ss,
375 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
378 struct rte_crypto_sym_op *sop;
380 for (i = 0; i != num; i++) {
382 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
383 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
384 cop[i]->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
386 __rte_crypto_sym_op_attach_sym_session(sop, ss->crypto.ses);
391 * setup crypto op and crypto sym op for ESP outbound packet.
394 esp_outb_cop_prepare(struct rte_crypto_op *cop,
395 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
396 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
398 struct rte_crypto_sym_op *sop;
399 struct aead_gcm_iv *gcm;
401 /* fill sym op fields */
404 /* AEAD (AES_GCM) case */
405 if (sa->aad_len != 0) {
406 sop->aead.data.offset = sa->ctp.cipher.offset + hlen;
407 sop->aead.data.length = sa->ctp.cipher.length + plen;
408 sop->aead.digest.data = icv->va;
409 sop->aead.digest.phys_addr = icv->pa;
410 sop->aead.aad.data = icv->va + sa->icv_len;
411 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
413 /* fill AAD IV (located inside crypto op) */
414 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
416 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
417 /* CRYPT+AUTH case */
419 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
420 sop->cipher.data.length = sa->ctp.cipher.length + plen;
421 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
422 sop->auth.data.length = sa->ctp.auth.length + plen;
423 sop->auth.digest.data = icv->va;
424 sop->auth.digest.phys_addr = icv->pa;
429 * setup/update packet data and metadata for ESP outbound tunnel case.
431 static inline int32_t
432 esp_outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
433 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
434 union sym_op_data *icv)
436 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
438 struct esp_hdr *esph;
439 struct esp_tail *espt;
443 /* calculate extra header space required */
444 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
446 /* size of ipsec protected data */
448 plen = mb->pkt_len - mb->l2_len;
450 /* number of bytes to encrypt */
451 clen = plen + sizeof(*espt);
452 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
454 /* pad length + esp tail */
456 tlen = pdlen + sa->icv_len;
458 /* do append and prepend */
459 ml = rte_pktmbuf_lastseg(mb);
460 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
464 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
469 pdofs = ml->data_len;
470 ml->data_len += tlen;
472 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
474 /* update pkt l2/l3 len */
475 mb->l2_len = sa->hdr_l3_off;
476 mb->l3_len = sa->hdr_len - sa->hdr_l3_off;
478 /* copy tunnel pkt header */
479 rte_memcpy(ph, sa->hdr, sa->hdr_len);
481 /* update original and new ip header fields */
482 update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
485 /* update spi, seqn and iv */
486 esph = (struct esp_hdr *)(ph + sa->hdr_len);
487 iv = (uint64_t *)(esph + 1);
488 rte_memcpy(iv, ivp, sa->iv_len);
491 esph->seq = sqn_low32(sqc);
494 pdofs += pdlen + sa->sqh_len;
497 pdlen -= sizeof(*espt);
499 /* copy padding data */
500 rte_memcpy(pt, esp_pad_bytes, pdlen);
502 /* update esp trailer */
503 espt = (struct esp_tail *)(pt + pdlen);
504 espt->pad_len = pdlen;
505 espt->next_proto = sa->proto;
507 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
508 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
514 * for pure cryptodev (lookaside none) depending on SA settings,
515 * we might have to write some extra data to the packet.
518 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
519 const union sym_op_data *icv)
522 struct aead_gcm_aad *aad;
524 /* insert SQN.hi between ESP trailer and ICV */
525 if (sa->sqh_len != 0) {
526 psqh = (uint32_t *)(icv->va - sa->sqh_len);
527 psqh[0] = sqn_hi32(sqc);
531 * fill IV and AAD fields, if any (aad fields are placed after icv),
532 * right now we support only one AEAD algorithm: AES-GCM .
534 if (sa->aad_len != 0) {
535 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
536 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
541 * setup/update packets and crypto ops for ESP outbound tunnel case.
544 outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
545 struct rte_crypto_op *cop[], uint16_t num)
551 struct rte_ipsec_sa *sa;
552 union sym_op_data icv;
553 uint64_t iv[IPSEC_MAX_IV_QWORD];
554 struct rte_mbuf *dr[num];
559 sqn = esn_outb_update_sqn(sa, &n);
561 rte_errno = EOVERFLOW;
564 for (i = 0; i != n; i++) {
566 sqc = rte_cpu_to_be_64(sqn + i);
569 /* try to update the packet itself */
570 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
572 /* success, setup crypto op */
575 outb_pkt_xprepare(sa, sqc, &icv);
576 esp_outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
578 /* failure, put packet into the death-row */
586 lksd_none_cop_prepare(ss, mb, cop, k);
588 /* copy not prepared mbufs beyond good ones */
589 if (k != n && k != 0)
590 mbuf_bulk_copy(mb + k, dr, n - k);
596 * setup/update packet data and metadata for ESP outbound transport case.
598 static inline int32_t
599 esp_outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
600 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
601 uint32_t l2len, uint32_t l3len, union sym_op_data *icv)
604 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
606 struct esp_hdr *esph;
607 struct esp_tail *espt;
611 uhlen = l2len + l3len;
612 plen = mb->pkt_len - uhlen;
614 /* calculate extra header space required */
615 hlen = sa->iv_len + sizeof(*esph);
617 /* number of bytes to encrypt */
618 clen = plen + sizeof(*espt);
619 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
621 /* pad length + esp tail */
623 tlen = pdlen + sa->icv_len;
625 /* do append and insert */
626 ml = rte_pktmbuf_lastseg(mb);
627 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
630 /* prepend space for ESP header */
631 ph = rte_pktmbuf_prepend(mb, hlen);
636 pdofs = ml->data_len;
637 ml->data_len += tlen;
639 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
641 /* shift L2/L3 headers */
642 insert_esph(ph, ph + hlen, uhlen);
644 /* update ip header fields */
645 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
648 /* update spi, seqn and iv */
649 esph = (struct esp_hdr *)(ph + uhlen);
650 iv = (uint64_t *)(esph + 1);
651 rte_memcpy(iv, ivp, sa->iv_len);
654 esph->seq = sqn_low32(sqc);
657 pdofs += pdlen + sa->sqh_len;
660 pdlen -= sizeof(*espt);
662 /* copy padding data */
663 rte_memcpy(pt, esp_pad_bytes, pdlen);
665 /* update esp trailer */
666 espt = (struct esp_tail *)(pt + pdlen);
667 espt->pad_len = pdlen;
668 espt->next_proto = np;
670 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
671 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
677 * setup/update packets and crypto ops for ESP outbound transport case.
680 outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
681 struct rte_crypto_op *cop[], uint16_t num)
684 uint32_t i, k, n, l2, l3;
687 struct rte_ipsec_sa *sa;
688 union sym_op_data icv;
689 uint64_t iv[IPSEC_MAX_IV_QWORD];
690 struct rte_mbuf *dr[num];
695 sqn = esn_outb_update_sqn(sa, &n);
697 rte_errno = EOVERFLOW;
700 for (i = 0; i != n; i++) {
705 sqc = rte_cpu_to_be_64(sqn + i);
708 /* try to update the packet itself */
709 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
712 /* success, setup crypto op */
715 outb_pkt_xprepare(sa, sqc, &icv);
716 esp_outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
718 /* failure, put packet into the death-row */
726 lksd_none_cop_prepare(ss, mb, cop, k);
728 /* copy not prepared mbufs beyond good ones */
729 if (k != n && k != 0)
730 mbuf_bulk_copy(mb + k, dr, n - k);
736 * setup crypto op and crypto sym op for ESP inbound tunnel packet.
738 static inline int32_t
739 esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,
740 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
741 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
743 struct rte_crypto_sym_op *sop;
744 struct aead_gcm_iv *gcm;
748 clen = plen - sa->ctp.cipher.length;
749 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
752 /* fill sym op fields */
755 /* AEAD (AES_GCM) case */
756 if (sa->aad_len != 0) {
757 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
758 sop->aead.data.length = clen;
759 sop->aead.digest.data = icv->va;
760 sop->aead.digest.phys_addr = icv->pa;
761 sop->aead.aad.data = icv->va + sa->icv_len;
762 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
764 /* fill AAD IV (located inside crypto op) */
765 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
767 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
768 pofs + sizeof(struct esp_hdr));
769 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
770 /* CRYPT+AUTH case */
772 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
773 sop->cipher.data.length = clen;
774 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
775 sop->auth.data.length = plen - sa->ctp.auth.length;
776 sop->auth.digest.data = icv->va;
777 sop->auth.digest.phys_addr = icv->pa;
779 /* copy iv from the input packet to the cop */
780 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
781 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
782 pofs + sizeof(struct esp_hdr));
783 rte_memcpy(ivc, ivp, sa->iv_len);
789 * for pure cryptodev (lookaside none) depending on SA settings,
790 * we might have to write some extra data to the packet.
793 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
794 const union sym_op_data *icv)
796 struct aead_gcm_aad *aad;
798 /* insert SQN.hi between ESP trailer and ICV */
799 if (sa->sqh_len != 0)
800 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
803 * fill AAD fields, if any (aad fields are placed after icv),
804 * right now we support only one AEAD algorithm: AES-GCM.
806 if (sa->aad_len != 0) {
807 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
808 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
813 * setup/update packet data and metadata for ESP inbound tunnel case.
815 static inline int32_t
816 esp_inb_tun_pkt_prepare(const struct rte_ipsec_sa *sa,
817 const struct replay_sqn *rsn, struct rte_mbuf *mb,
818 uint32_t hlen, union sym_op_data *icv)
822 uint32_t icv_ofs, plen;
824 struct esp_hdr *esph;
826 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
829 * retrieve and reconstruct SQN, then check it, then
830 * convert it back into network byte order.
832 sqn = rte_be_to_cpu_32(esph->seq);
834 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
836 rc = esn_inb_check_sqn(rsn, sa, sqn);
840 sqn = rte_cpu_to_be_64(sqn);
842 /* start packet manipulation */
846 ml = rte_pktmbuf_lastseg(mb);
847 icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
849 /* we have to allocate space for AAD somewhere,
850 * right now - just use free trailing space at the last segment.
851 * Would probably be more convenient to reserve space for AAD
852 * inside rte_crypto_op itself
853 * (again for IV space is already reserved inside cop).
855 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
858 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
859 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
861 inb_pkt_xprepare(sa, sqn, icv);
866 * setup/update packets and crypto ops for ESP inbound case.
869 inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
870 struct rte_crypto_op *cop[], uint16_t num)
874 struct rte_ipsec_sa *sa;
875 struct replay_sqn *rsn;
876 union sym_op_data icv;
877 struct rte_mbuf *dr[num];
883 for (i = 0; i != num; i++) {
885 hl = mb[i]->l2_len + mb[i]->l3_len;
886 rc = esp_inb_tun_pkt_prepare(sa, rsn, mb[i], hl, &icv);
888 rc = esp_inb_tun_cop_prepare(cop[k], sa, mb[i], &icv,
900 lksd_none_cop_prepare(ss, mb, cop, k);
902 /* copy not prepared mbufs beyond good ones */
903 if (k != num && k != 0)
904 mbuf_bulk_copy(mb + k, dr, num - k);
910 * setup crypto ops for LOOKASIDE_PROTO type of devices.
913 lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
914 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
917 struct rte_crypto_sym_op *sop;
919 for (i = 0; i != num; i++) {
921 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
922 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
923 cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
925 __rte_security_attach_session(sop, ss->security.ses);
930 * setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
931 * Note that for LOOKASIDE_PROTO all packet modifications will be
932 * performed by PMD/HW.
933 * SW has only to prepare crypto op.
936 lksd_proto_prepare(const struct rte_ipsec_session *ss,
937 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
939 lksd_proto_cop_prepare(ss, mb, cop, num);
944 * process ESP inbound tunnel packet.
947 esp_inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
950 uint32_t hlen, icv_len, tlen;
951 struct esp_hdr *esph;
952 struct esp_tail *espt;
956 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
959 icv_len = sa->icv_len;
961 ml = rte_pktmbuf_lastseg(mb);
962 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
963 ml->data_len - icv_len - sizeof(*espt));
966 * check padding and next proto.
967 * return an error if something is wrong.
969 pd = (char *)espt - espt->pad_len;
970 if (espt->next_proto != sa->proto ||
971 memcmp(pd, esp_pad_bytes, espt->pad_len))
974 /* cut of ICV, ESP tail and padding bytes */
975 tlen = icv_len + sizeof(*espt) + espt->pad_len;
976 ml->data_len -= tlen;
979 /* cut of L2/L3 headers, ESP header and IV */
980 hlen = mb->l2_len + mb->l3_len;
981 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
982 rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);
984 /* retrieve SQN for later check */
985 *sqn = rte_be_to_cpu_32(esph->seq);
987 /* reset mbuf metatdata: L2/L3 len, packet type */
988 mb->packet_type = RTE_PTYPE_UNKNOWN;
992 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
993 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
998 * process ESP inbound transport packet.
1001 esp_inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
1004 uint32_t hlen, icv_len, l2len, l3len, tlen;
1005 struct esp_hdr *esph;
1006 struct esp_tail *espt;
1007 struct rte_mbuf *ml;
1010 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1013 icv_len = sa->icv_len;
1015 ml = rte_pktmbuf_lastseg(mb);
1016 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1017 ml->data_len - icv_len - sizeof(*espt));
1019 /* check padding, return an error if something is wrong. */
1020 pd = (char *)espt - espt->pad_len;
1021 if (memcmp(pd, esp_pad_bytes, espt->pad_len))
1024 /* cut of ICV, ESP tail and padding bytes */
1025 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1026 ml->data_len -= tlen;
1027 mb->pkt_len -= tlen;
1029 /* retrieve SQN for later check */
1032 hlen = l2len + l3len;
1033 op = rte_pktmbuf_mtod(mb, char *);
1034 esph = (struct esp_hdr *)(op + hlen);
1035 *sqn = rte_be_to_cpu_32(esph->seq);
1037 /* cut off ESP header and IV, update L3 header */
1038 np = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);
1039 remove_esph(np, op, hlen);
1040 update_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,
1043 /* reset mbuf packet type */
1044 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
1046 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1047 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1052 * for group of ESP inbound packets perform SQN check and update.
1054 static inline uint16_t
1055 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
1056 struct rte_mbuf *mb[], struct rte_mbuf *dr[], uint16_t num)
1059 struct replay_sqn *rsn;
1064 for (i = 0; i != num; i++) {
1065 if (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)
1075 * process group of ESP inbound tunnel packets.
1078 inb_tun_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1082 struct rte_ipsec_sa *sa;
1084 struct rte_mbuf *dr[num];
1088 /* process packets, extract seq numbers */
1091 for (i = 0; i != num; i++) {
1093 if (esp_inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)
1095 /* bad packet, will drop from furhter processing */
1100 /* update seq # and replay winow */
1101 k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k);
1103 /* handle unprocessed mbufs */
1105 rte_errno = EBADMSG;
1107 mbuf_bulk_copy(mb + k, dr, num - k);
1114 * process group of ESP inbound transport packets.
1117 inb_trs_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1122 struct rte_ipsec_sa *sa;
1123 struct rte_mbuf *dr[num];
1127 /* process packets, extract seq numbers */
1130 for (i = 0; i != num; i++) {
1132 if (esp_inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)
1134 /* bad packet, will drop from furhter processing */
1139 /* update seq # and replay winow */
1140 k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k);
1142 /* handle unprocessed mbufs */
1144 rte_errno = EBADMSG;
1146 mbuf_bulk_copy(mb + k, dr, num - k);
1153 * process outbound packets for SA with ESN support,
1154 * for algorithms that require SQN.hibits to be implictly included
1155 * into digest computation.
1156 * In that case we have to move ICV bytes back to their proper place.
1159 outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1162 uint32_t i, k, icv_len, *icv;
1163 struct rte_mbuf *ml;
1164 struct rte_ipsec_sa *sa;
1165 struct rte_mbuf *dr[num];
1170 icv_len = sa->icv_len;
1172 for (i = 0; i != num; i++) {
1173 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
1174 ml = rte_pktmbuf_lastseg(mb[i]);
1175 icv = rte_pktmbuf_mtod_offset(ml, void *,
1176 ml->data_len - icv_len);
1177 remove_sqh(icv, icv_len);
1183 /* handle unprocessed mbufs */
1185 rte_errno = EBADMSG;
1187 mbuf_bulk_copy(mb + k, dr, num - k);
1194 * simplest pkt process routine:
1195 * all actual processing is already done by HW/PMD,
1196 * just check mbuf ol_flags.
1198 * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
1199 * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
1200 * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
1203 pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1207 struct rte_mbuf *dr[num];
1212 for (i = 0; i != num; i++) {
1213 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
1219 /* handle unprocessed mbufs */
1221 rte_errno = EBADMSG;
1223 mbuf_bulk_copy(mb + k, dr, num - k);
1230 * prepare packets for inline ipsec processing:
1231 * set ol_flags and attach metadata.
1234 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
1235 struct rte_mbuf *mb[], uint16_t num)
1237 uint32_t i, ol_flags;
1239 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
1240 for (i = 0; i != num; i++) {
1242 mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
1244 rte_security_set_pkt_metadata(ss->security.ctx,
1245 ss->security.ses, mb[i], NULL);
1250 * process group of ESP outbound tunnel packets destined for
1251 * INLINE_CRYPTO type of device.
1254 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
1255 struct rte_mbuf *mb[], uint16_t num)
1261 struct rte_ipsec_sa *sa;
1262 union sym_op_data icv;
1263 uint64_t iv[IPSEC_MAX_IV_QWORD];
1264 struct rte_mbuf *dr[num];
1269 sqn = esn_outb_update_sqn(sa, &n);
1271 rte_errno = EOVERFLOW;
1274 for (i = 0; i != n; i++) {
1276 sqc = rte_cpu_to_be_64(sqn + i);
1279 /* try to update the packet itself */
1280 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
1282 /* success, update mbuf fields */
1285 /* failure, put packet into the death-row */
1292 inline_outb_mbuf_prepare(ss, mb, k);
1294 /* copy not processed mbufs beyond good ones */
1295 if (k != n && k != 0)
1296 mbuf_bulk_copy(mb + k, dr, n - k);
1302 * process group of ESP outbound transport packets destined for
1303 * INLINE_CRYPTO type of device.
1306 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
1307 struct rte_mbuf *mb[], uint16_t num)
1310 uint32_t i, k, n, l2, l3;
1313 struct rte_ipsec_sa *sa;
1314 union sym_op_data icv;
1315 uint64_t iv[IPSEC_MAX_IV_QWORD];
1316 struct rte_mbuf *dr[num];
1321 sqn = esn_outb_update_sqn(sa, &n);
1323 rte_errno = EOVERFLOW;
1326 for (i = 0; i != n; i++) {
1331 sqc = rte_cpu_to_be_64(sqn + i);
1334 /* try to update the packet itself */
1335 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
1338 /* success, update mbuf fields */
1341 /* failure, put packet into the death-row */
1348 inline_outb_mbuf_prepare(ss, mb, k);
1350 /* copy not processed mbufs beyond good ones */
1351 if (k != n && k != 0)
1352 mbuf_bulk_copy(mb + k, dr, n - k);
1358 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1359 * actual processing is done by HW/PMD, just set flags and metadata.
1362 outb_inline_proto_process(const struct rte_ipsec_session *ss,
1363 struct rte_mbuf *mb[], uint16_t num)
1365 inline_outb_mbuf_prepare(ss, mb, num);
1370 * Select packet processing function for session on LOOKASIDE_NONE
1374 lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
1375 struct rte_ipsec_sa_pkt_func *pf)
1379 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1380 RTE_IPSEC_SATP_MODE_MASK;
1383 switch (sa->type & msk) {
1384 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1385 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1386 pf->prepare = inb_pkt_prepare;
1387 pf->process = inb_tun_pkt_process;
1389 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1390 pf->prepare = inb_pkt_prepare;
1391 pf->process = inb_trs_pkt_process;
1393 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1394 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1395 pf->prepare = outb_tun_prepare;
1396 pf->process = (sa->sqh_len != 0) ?
1397 outb_sqh_process : pkt_flag_process;
1399 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1400 pf->prepare = outb_trs_prepare;
1401 pf->process = (sa->sqh_len != 0) ?
1402 outb_sqh_process : pkt_flag_process;
1412 * Select packet processing function for session on INLINE_CRYPTO
1416 inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
1417 struct rte_ipsec_sa_pkt_func *pf)
1421 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1422 RTE_IPSEC_SATP_MODE_MASK;
1425 switch (sa->type & msk) {
1426 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1427 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1428 pf->process = inb_tun_pkt_process;
1430 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1431 pf->process = inb_trs_pkt_process;
1433 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1434 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1435 pf->process = inline_outb_tun_pkt_process;
1437 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1438 pf->process = inline_outb_trs_pkt_process;
1448 * Select packet processing function for given session based on SA parameters
1449 * and type of associated with the session device.
1452 ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
1453 const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
1458 pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 };
1461 case RTE_SECURITY_ACTION_TYPE_NONE:
1462 rc = lksd_none_pkt_func_select(sa, pf);
1464 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1465 rc = inline_crypto_pkt_func_select(sa, pf);
1467 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1468 if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
1469 RTE_IPSEC_SATP_DIR_IB)
1470 pf->process = pkt_flag_process;
1472 pf->process = outb_inline_proto_process;
1474 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
1475 pf->prepare = lksd_proto_prepare;
1476 pf->process = pkt_flag_process;