1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
17 /* some helper structures */
19 struct rte_crypto_auth_xform *auth;
20 struct rte_crypto_cipher_xform *cipher;
21 struct rte_crypto_aead_xform *aead;
25 * helper routine, fills internal crypto_xform structure.
28 fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
29 const struct rte_ipsec_sa_prm *prm)
31 struct rte_crypto_sym_xform *xf, *xfn;
33 memset(xform, 0, sizeof(*xform));
35 xf = prm->crypto_xform;
41 /* for AEAD just one xform required */
42 if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
45 xform->aead = &xf->aead;
47 * CIPHER+AUTH xforms are expected in strict order,
48 * depending on SA direction:
49 * inbound: AUTH+CIPHER
50 * outbound: CIPHER+AUTH
52 } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
54 /* wrong order or no cipher */
55 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
56 xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
59 xform->auth = &xf->auth;
60 xform->cipher = &xfn->cipher;
64 /* wrong order or no auth */
65 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
66 xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
69 xform->cipher = &xf->cipher;
70 xform->auth = &xfn->auth;
76 uint64_t __rte_experimental
77 rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
83 ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
90 if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
93 * RFC 4303 recommends 64 as minimum window size.
94 * there is no point to use ESN mode without SQN window,
95 * so make sure we have at least 64 window when ESN is enalbed.
97 wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
98 RTE_IPSEC_SATP_ESN_DISABLE) ?
99 wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
101 n = replay_num_bucket(wsz);
104 if (n > WINDOW_BUCKET_MAX)
111 if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
112 sz *= REPLAY_SQN_NUM;
114 sz += sizeof(struct rte_ipsec_sa);
118 void __rte_experimental
119 rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
121 memset(sa, 0, sa->size);
125 * Determine expected SA type based on input parameters.
128 fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
134 if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
135 tp |= RTE_IPSEC_SATP_PROTO_AH;
136 else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
137 tp |= RTE_IPSEC_SATP_PROTO_ESP;
141 if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
142 tp |= RTE_IPSEC_SATP_DIR_OB;
143 else if (prm->ipsec_xform.direction ==
144 RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
145 tp |= RTE_IPSEC_SATP_DIR_IB;
149 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
150 if (prm->ipsec_xform.tunnel.type ==
151 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
152 tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
153 else if (prm->ipsec_xform.tunnel.type ==
154 RTE_SECURITY_IPSEC_TUNNEL_IPV6)
155 tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
159 if (prm->tun.next_proto == IPPROTO_IPIP)
160 tp |= RTE_IPSEC_SATP_IPV4;
161 else if (prm->tun.next_proto == IPPROTO_IPV6)
162 tp |= RTE_IPSEC_SATP_IPV6;
165 } else if (prm->ipsec_xform.mode ==
166 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
167 tp |= RTE_IPSEC_SATP_MODE_TRANS;
168 if (prm->trs.proto == IPPROTO_IPIP)
169 tp |= RTE_IPSEC_SATP_IPV4;
170 else if (prm->trs.proto == IPPROTO_IPV6)
171 tp |= RTE_IPSEC_SATP_IPV6;
177 /* check for ESN flag */
178 if (prm->ipsec_xform.options.esn == 0)
179 tp |= RTE_IPSEC_SATP_ESN_DISABLE;
181 tp |= RTE_IPSEC_SATP_ESN_ENABLE;
183 /* interpret flags */
184 if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
185 tp |= RTE_IPSEC_SATP_SQN_ATOM;
187 tp |= RTE_IPSEC_SATP_SQN_RAW;
194 * Init ESP inbound specific things.
197 esp_inb_init(struct rte_ipsec_sa *sa)
199 /* these params may differ with new algorithms support */
200 sa->ctp.auth.offset = 0;
201 sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
202 sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
203 sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
207 * Init ESP inbound tunnel specific things.
210 esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
212 sa->proto = prm->tun.next_proto;
217 * Init ESP outbound specific things.
220 esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
222 sa->sqn.outb.raw = 1;
224 /* these params may differ with new algorithms support */
225 sa->ctp.auth.offset = hlen;
226 sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
227 if (sa->aad_len != 0) {
228 sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
230 sa->ctp.cipher.length = 0;
232 sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
233 sa->ctp.cipher.length = sa->iv_len;
238 * Init ESP outbound tunnel specific things.
241 esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
243 sa->proto = prm->tun.next_proto;
244 sa->hdr_len = prm->tun.hdr_len;
245 sa->hdr_l3_off = prm->tun.hdr_l3_off;
246 memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
248 esp_outb_init(sa, sa->hdr_len);
252 * helper function, init SA structure.
255 esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
256 const struct crypto_xform *cxf)
258 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
259 RTE_IPSEC_SATP_MODE_MASK;
261 if (cxf->aead != NULL) {
263 if (cxf->aead->algo != RTE_CRYPTO_AEAD_AES_GCM)
265 sa->aad_len = sizeof(struct aead_gcm_aad);
266 sa->icv_len = cxf->aead->digest_length;
267 sa->iv_ofs = cxf->aead->iv.offset;
268 sa->iv_len = sizeof(uint64_t);
269 sa->pad_align = IPSEC_PAD_AES_GCM;
271 sa->icv_len = cxf->auth->digest_length;
272 sa->iv_ofs = cxf->cipher->iv.offset;
273 sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
274 if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_NULL) {
275 sa->pad_align = IPSEC_PAD_NULL;
277 } else if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_AES_CBC) {
278 sa->pad_align = IPSEC_PAD_AES_CBC;
279 sa->iv_len = IPSEC_MAX_IV_SIZE;
284 sa->udata = prm->userdata;
285 sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
286 sa->salt = prm->ipsec_xform.salt;
288 switch (sa->type & msk) {
289 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
290 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
291 esp_inb_tun_init(sa, prm);
293 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
296 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
297 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
298 esp_outb_tun_init(sa, prm);
300 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
301 esp_outb_init(sa, 0);
309 * helper function, init SA replay structure.
312 fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket)
314 sa->replay.win_sz = wnd_sz;
315 sa->replay.nb_bucket = nb_bucket;
316 sa->replay.bucket_index_mask = nb_bucket - 1;
317 sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
318 if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
319 sa->sqn.inb.rsn[1] = (struct replay_sqn *)
320 ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
323 int __rte_experimental
324 rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
333 /* determine SA type */
334 rc = fill_sa_type(prm, &type);
338 /* determine required size */
339 wsz = prm->replay_win_sz;
340 return ipsec_sa_size(type, &wsz, &nb);
343 int __rte_experimental
344 rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
350 struct crypto_xform cxf;
352 if (sa == NULL || prm == NULL)
355 /* determine SA type */
356 rc = fill_sa_type(prm, &type);
360 /* determine required size */
361 wsz = prm->replay_win_sz;
362 sz = ipsec_sa_size(type, &wsz, &nb);
365 else if (size < (uint32_t)sz)
368 /* only esp is supported right now */
369 if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
372 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
373 prm->tun.hdr_len > sizeof(sa->hdr))
376 rc = fill_crypto_xform(&cxf, type, prm);
386 /* check for ESN flag */
387 sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
388 UINT32_MAX : UINT64_MAX;
390 rc = esp_sa_init(sa, prm, &cxf);
392 rte_ipsec_sa_fini(sa);
394 /* fill replay window related fields */
396 fill_sa_replay(sa, wsz, nb);
402 mbuf_bulk_copy(struct rte_mbuf *dst[], struct rte_mbuf * const src[],
407 for (i = 0; i != num; i++)
412 * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.
415 lksd_none_cop_prepare(const struct rte_ipsec_session *ss,
416 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
419 struct rte_crypto_sym_op *sop;
421 for (i = 0; i != num; i++) {
423 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
424 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
425 cop[i]->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
427 __rte_crypto_sym_op_attach_sym_session(sop, ss->crypto.ses);
432 * setup crypto op and crypto sym op for ESP outbound packet.
435 esp_outb_cop_prepare(struct rte_crypto_op *cop,
436 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
437 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
439 struct rte_crypto_sym_op *sop;
440 struct aead_gcm_iv *gcm;
442 /* fill sym op fields */
445 /* AEAD (AES_GCM) case */
446 if (sa->aad_len != 0) {
447 sop->aead.data.offset = sa->ctp.cipher.offset + hlen;
448 sop->aead.data.length = sa->ctp.cipher.length + plen;
449 sop->aead.digest.data = icv->va;
450 sop->aead.digest.phys_addr = icv->pa;
451 sop->aead.aad.data = icv->va + sa->icv_len;
452 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
454 /* fill AAD IV (located inside crypto op) */
455 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
457 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
458 /* CRYPT+AUTH case */
460 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
461 sop->cipher.data.length = sa->ctp.cipher.length + plen;
462 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
463 sop->auth.data.length = sa->ctp.auth.length + plen;
464 sop->auth.digest.data = icv->va;
465 sop->auth.digest.phys_addr = icv->pa;
470 * setup/update packet data and metadata for ESP outbound tunnel case.
472 static inline int32_t
473 esp_outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
474 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
475 union sym_op_data *icv)
477 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
479 struct esp_hdr *esph;
480 struct esp_tail *espt;
484 /* calculate extra header space required */
485 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
487 /* size of ipsec protected data */
489 plen = mb->pkt_len - mb->l2_len;
491 /* number of bytes to encrypt */
492 clen = plen + sizeof(*espt);
493 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
495 /* pad length + esp tail */
497 tlen = pdlen + sa->icv_len;
499 /* do append and prepend */
500 ml = rte_pktmbuf_lastseg(mb);
501 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
505 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
510 pdofs = ml->data_len;
511 ml->data_len += tlen;
513 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
515 /* update pkt l2/l3 len */
516 mb->l2_len = sa->hdr_l3_off;
517 mb->l3_len = sa->hdr_len - sa->hdr_l3_off;
519 /* copy tunnel pkt header */
520 rte_memcpy(ph, sa->hdr, sa->hdr_len);
522 /* update original and new ip header fields */
523 update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
526 /* update spi, seqn and iv */
527 esph = (struct esp_hdr *)(ph + sa->hdr_len);
528 iv = (uint64_t *)(esph + 1);
529 copy_iv(iv, ivp, sa->iv_len);
532 esph->seq = sqn_low32(sqc);
535 pdofs += pdlen + sa->sqh_len;
538 pdlen -= sizeof(*espt);
540 /* copy padding data */
541 rte_memcpy(pt, esp_pad_bytes, pdlen);
543 /* update esp trailer */
544 espt = (struct esp_tail *)(pt + pdlen);
545 espt->pad_len = pdlen;
546 espt->next_proto = sa->proto;
548 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
549 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
555 * for pure cryptodev (lookaside none) depending on SA settings,
556 * we might have to write some extra data to the packet.
559 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
560 const union sym_op_data *icv)
563 struct aead_gcm_aad *aad;
565 /* insert SQN.hi between ESP trailer and ICV */
566 if (sa->sqh_len != 0) {
567 psqh = (uint32_t *)(icv->va - sa->sqh_len);
568 psqh[0] = sqn_hi32(sqc);
572 * fill IV and AAD fields, if any (aad fields are placed after icv),
573 * right now we support only one AEAD algorithm: AES-GCM .
575 if (sa->aad_len != 0) {
576 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
577 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
582 * setup/update packets and crypto ops for ESP outbound tunnel case.
585 outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
586 struct rte_crypto_op *cop[], uint16_t num)
592 struct rte_ipsec_sa *sa;
593 union sym_op_data icv;
594 uint64_t iv[IPSEC_MAX_IV_QWORD];
595 struct rte_mbuf *dr[num];
600 sqn = esn_outb_update_sqn(sa, &n);
602 rte_errno = EOVERFLOW;
605 for (i = 0; i != n; i++) {
607 sqc = rte_cpu_to_be_64(sqn + i);
610 /* try to update the packet itself */
611 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
613 /* success, setup crypto op */
616 outb_pkt_xprepare(sa, sqc, &icv);
617 esp_outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
619 /* failure, put packet into the death-row */
627 lksd_none_cop_prepare(ss, mb, cop, k);
629 /* copy not prepared mbufs beyond good ones */
630 if (k != n && k != 0)
631 mbuf_bulk_copy(mb + k, dr, n - k);
637 * setup/update packet data and metadata for ESP outbound transport case.
639 static inline int32_t
640 esp_outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
641 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
642 uint32_t l2len, uint32_t l3len, union sym_op_data *icv)
645 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
647 struct esp_hdr *esph;
648 struct esp_tail *espt;
652 uhlen = l2len + l3len;
653 plen = mb->pkt_len - uhlen;
655 /* calculate extra header space required */
656 hlen = sa->iv_len + sizeof(*esph);
658 /* number of bytes to encrypt */
659 clen = plen + sizeof(*espt);
660 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
662 /* pad length + esp tail */
664 tlen = pdlen + sa->icv_len;
666 /* do append and insert */
667 ml = rte_pktmbuf_lastseg(mb);
668 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
671 /* prepend space for ESP header */
672 ph = rte_pktmbuf_prepend(mb, hlen);
677 pdofs = ml->data_len;
678 ml->data_len += tlen;
680 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
682 /* shift L2/L3 headers */
683 insert_esph(ph, ph + hlen, uhlen);
685 /* update ip header fields */
686 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
689 /* update spi, seqn and iv */
690 esph = (struct esp_hdr *)(ph + uhlen);
691 iv = (uint64_t *)(esph + 1);
692 copy_iv(iv, ivp, sa->iv_len);
695 esph->seq = sqn_low32(sqc);
698 pdofs += pdlen + sa->sqh_len;
701 pdlen -= sizeof(*espt);
703 /* copy padding data */
704 rte_memcpy(pt, esp_pad_bytes, pdlen);
706 /* update esp trailer */
707 espt = (struct esp_tail *)(pt + pdlen);
708 espt->pad_len = pdlen;
709 espt->next_proto = np;
711 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
712 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
718 * setup/update packets and crypto ops for ESP outbound transport case.
721 outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
722 struct rte_crypto_op *cop[], uint16_t num)
725 uint32_t i, k, n, l2, l3;
728 struct rte_ipsec_sa *sa;
729 union sym_op_data icv;
730 uint64_t iv[IPSEC_MAX_IV_QWORD];
731 struct rte_mbuf *dr[num];
736 sqn = esn_outb_update_sqn(sa, &n);
738 rte_errno = EOVERFLOW;
741 for (i = 0; i != n; i++) {
746 sqc = rte_cpu_to_be_64(sqn + i);
749 /* try to update the packet itself */
750 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
753 /* success, setup crypto op */
756 outb_pkt_xprepare(sa, sqc, &icv);
757 esp_outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
759 /* failure, put packet into the death-row */
767 lksd_none_cop_prepare(ss, mb, cop, k);
769 /* copy not prepared mbufs beyond good ones */
770 if (k != n && k != 0)
771 mbuf_bulk_copy(mb + k, dr, n - k);
777 * setup crypto op and crypto sym op for ESP inbound tunnel packet.
779 static inline int32_t
780 esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,
781 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
782 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
784 struct rte_crypto_sym_op *sop;
785 struct aead_gcm_iv *gcm;
789 clen = plen - sa->ctp.cipher.length;
790 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
793 /* fill sym op fields */
796 /* AEAD (AES_GCM) case */
797 if (sa->aad_len != 0) {
798 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
799 sop->aead.data.length = clen;
800 sop->aead.digest.data = icv->va;
801 sop->aead.digest.phys_addr = icv->pa;
802 sop->aead.aad.data = icv->va + sa->icv_len;
803 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
805 /* fill AAD IV (located inside crypto op) */
806 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
808 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
809 pofs + sizeof(struct esp_hdr));
810 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
811 /* CRYPT+AUTH case */
813 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
814 sop->cipher.data.length = clen;
815 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
816 sop->auth.data.length = plen - sa->ctp.auth.length;
817 sop->auth.digest.data = icv->va;
818 sop->auth.digest.phys_addr = icv->pa;
820 /* copy iv from the input packet to the cop */
821 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
822 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
823 pofs + sizeof(struct esp_hdr));
824 copy_iv(ivc, ivp, sa->iv_len);
830 * for pure cryptodev (lookaside none) depending on SA settings,
831 * we might have to write some extra data to the packet.
834 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
835 const union sym_op_data *icv)
837 struct aead_gcm_aad *aad;
839 /* insert SQN.hi between ESP trailer and ICV */
840 if (sa->sqh_len != 0)
841 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
844 * fill AAD fields, if any (aad fields are placed after icv),
845 * right now we support only one AEAD algorithm: AES-GCM.
847 if (sa->aad_len != 0) {
848 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
849 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
854 * setup/update packet data and metadata for ESP inbound tunnel case.
856 static inline int32_t
857 esp_inb_tun_pkt_prepare(const struct rte_ipsec_sa *sa,
858 const struct replay_sqn *rsn, struct rte_mbuf *mb,
859 uint32_t hlen, union sym_op_data *icv)
863 uint32_t icv_ofs, plen;
865 struct esp_hdr *esph;
867 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
870 * retrieve and reconstruct SQN, then check it, then
871 * convert it back into network byte order.
873 sqn = rte_be_to_cpu_32(esph->seq);
875 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
877 rc = esn_inb_check_sqn(rsn, sa, sqn);
881 sqn = rte_cpu_to_be_64(sqn);
883 /* start packet manipulation */
887 ml = rte_pktmbuf_lastseg(mb);
888 icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
890 /* we have to allocate space for AAD somewhere,
891 * right now - just use free trailing space at the last segment.
892 * Would probably be more convenient to reserve space for AAD
893 * inside rte_crypto_op itself
894 * (again for IV space is already reserved inside cop).
896 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
899 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
900 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
902 inb_pkt_xprepare(sa, sqn, icv);
907 * setup/update packets and crypto ops for ESP inbound case.
910 inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
911 struct rte_crypto_op *cop[], uint16_t num)
915 struct rte_ipsec_sa *sa;
916 struct replay_sqn *rsn;
917 union sym_op_data icv;
918 struct rte_mbuf *dr[num];
921 rsn = rsn_acquire(sa);
924 for (i = 0; i != num; i++) {
926 hl = mb[i]->l2_len + mb[i]->l3_len;
927 rc = esp_inb_tun_pkt_prepare(sa, rsn, mb[i], hl, &icv);
929 rc = esp_inb_tun_cop_prepare(cop[k], sa, mb[i], &icv,
940 rsn_release(sa, rsn);
943 lksd_none_cop_prepare(ss, mb, cop, k);
945 /* copy not prepared mbufs beyond good ones */
946 if (k != num && k != 0)
947 mbuf_bulk_copy(mb + k, dr, num - k);
953 * setup crypto ops for LOOKASIDE_PROTO type of devices.
956 lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
957 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
960 struct rte_crypto_sym_op *sop;
962 for (i = 0; i != num; i++) {
964 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
965 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
966 cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
968 __rte_security_attach_session(sop, ss->security.ses);
973 * setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
974 * Note that for LOOKASIDE_PROTO all packet modifications will be
975 * performed by PMD/HW.
976 * SW has only to prepare crypto op.
979 lksd_proto_prepare(const struct rte_ipsec_session *ss,
980 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
982 lksd_proto_cop_prepare(ss, mb, cop, num);
987 * process ESP inbound tunnel packet.
990 esp_inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
993 uint32_t hlen, icv_len, tlen;
994 struct esp_hdr *esph;
995 struct esp_tail *espt;
999 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1002 icv_len = sa->icv_len;
1004 ml = rte_pktmbuf_lastseg(mb);
1005 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1006 ml->data_len - icv_len - sizeof(*espt));
1009 * check padding and next proto.
1010 * return an error if something is wrong.
1012 pd = (char *)espt - espt->pad_len;
1013 if (espt->next_proto != sa->proto ||
1014 memcmp(pd, esp_pad_bytes, espt->pad_len))
1017 /* cut of ICV, ESP tail and padding bytes */
1018 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1019 ml->data_len -= tlen;
1020 mb->pkt_len -= tlen;
1022 /* cut of L2/L3 headers, ESP header and IV */
1023 hlen = mb->l2_len + mb->l3_len;
1024 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
1025 rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);
1027 /* retrieve SQN for later check */
1028 *sqn = rte_be_to_cpu_32(esph->seq);
1030 /* reset mbuf metatdata: L2/L3 len, packet type */
1031 mb->packet_type = RTE_PTYPE_UNKNOWN;
1035 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1036 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1041 * process ESP inbound transport packet.
1044 esp_inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
1047 uint32_t hlen, icv_len, l2len, l3len, tlen;
1048 struct esp_hdr *esph;
1049 struct esp_tail *espt;
1050 struct rte_mbuf *ml;
1053 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1056 icv_len = sa->icv_len;
1058 ml = rte_pktmbuf_lastseg(mb);
1059 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1060 ml->data_len - icv_len - sizeof(*espt));
1062 /* check padding, return an error if something is wrong. */
1063 pd = (char *)espt - espt->pad_len;
1064 if (memcmp(pd, esp_pad_bytes, espt->pad_len))
1067 /* cut of ICV, ESP tail and padding bytes */
1068 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1069 ml->data_len -= tlen;
1070 mb->pkt_len -= tlen;
1072 /* retrieve SQN for later check */
1075 hlen = l2len + l3len;
1076 op = rte_pktmbuf_mtod(mb, char *);
1077 esph = (struct esp_hdr *)(op + hlen);
1078 *sqn = rte_be_to_cpu_32(esph->seq);
1080 /* cut off ESP header and IV, update L3 header */
1081 np = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);
1082 remove_esph(np, op, hlen);
1083 update_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,
1086 /* reset mbuf packet type */
1087 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
1089 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1090 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1095 * for group of ESP inbound packets perform SQN check and update.
1097 static inline uint16_t
1098 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
1099 struct rte_mbuf *mb[], struct rte_mbuf *dr[], uint16_t num)
1102 struct replay_sqn *rsn;
1104 rsn = rsn_update_start(sa);
1107 for (i = 0; i != num; i++) {
1108 if (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)
1114 rsn_update_finish(sa, rsn);
1119 * process group of ESP inbound tunnel packets.
1122 inb_tun_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1126 struct rte_ipsec_sa *sa;
1128 struct rte_mbuf *dr[num];
1132 /* process packets, extract seq numbers */
1135 for (i = 0; i != num; i++) {
1137 if (esp_inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)
1139 /* bad packet, will drop from furhter processing */
1144 /* update seq # and replay winow */
1145 k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k);
1147 /* handle unprocessed mbufs */
1149 rte_errno = EBADMSG;
1151 mbuf_bulk_copy(mb + k, dr, num - k);
1158 * process group of ESP inbound transport packets.
1161 inb_trs_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1166 struct rte_ipsec_sa *sa;
1167 struct rte_mbuf *dr[num];
1171 /* process packets, extract seq numbers */
1174 for (i = 0; i != num; i++) {
1176 if (esp_inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)
1178 /* bad packet, will drop from furhter processing */
1183 /* update seq # and replay winow */
1184 k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k);
1186 /* handle unprocessed mbufs */
1188 rte_errno = EBADMSG;
1190 mbuf_bulk_copy(mb + k, dr, num - k);
1197 * process outbound packets for SA with ESN support,
1198 * for algorithms that require SQN.hibits to be implictly included
1199 * into digest computation.
1200 * In that case we have to move ICV bytes back to their proper place.
1203 outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1206 uint32_t i, k, icv_len, *icv;
1207 struct rte_mbuf *ml;
1208 struct rte_ipsec_sa *sa;
1209 struct rte_mbuf *dr[num];
1214 icv_len = sa->icv_len;
1216 for (i = 0; i != num; i++) {
1217 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
1218 ml = rte_pktmbuf_lastseg(mb[i]);
1219 icv = rte_pktmbuf_mtod_offset(ml, void *,
1220 ml->data_len - icv_len);
1221 remove_sqh(icv, icv_len);
1227 /* handle unprocessed mbufs */
1229 rte_errno = EBADMSG;
1231 mbuf_bulk_copy(mb + k, dr, num - k);
1238 * simplest pkt process routine:
1239 * all actual processing is already done by HW/PMD,
1240 * just check mbuf ol_flags.
1242 * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
1243 * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
1244 * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
1247 pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1251 struct rte_mbuf *dr[num];
1256 for (i = 0; i != num; i++) {
1257 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
1263 /* handle unprocessed mbufs */
1265 rte_errno = EBADMSG;
1267 mbuf_bulk_copy(mb + k, dr, num - k);
1274 * prepare packets for inline ipsec processing:
1275 * set ol_flags and attach metadata.
1278 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
1279 struct rte_mbuf *mb[], uint16_t num)
1281 uint32_t i, ol_flags;
1283 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
1284 for (i = 0; i != num; i++) {
1286 mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
1288 rte_security_set_pkt_metadata(ss->security.ctx,
1289 ss->security.ses, mb[i], NULL);
1294 * process group of ESP outbound tunnel packets destined for
1295 * INLINE_CRYPTO type of device.
1298 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
1299 struct rte_mbuf *mb[], uint16_t num)
1305 struct rte_ipsec_sa *sa;
1306 union sym_op_data icv;
1307 uint64_t iv[IPSEC_MAX_IV_QWORD];
1308 struct rte_mbuf *dr[num];
1313 sqn = esn_outb_update_sqn(sa, &n);
1315 rte_errno = EOVERFLOW;
1318 for (i = 0; i != n; i++) {
1320 sqc = rte_cpu_to_be_64(sqn + i);
1323 /* try to update the packet itself */
1324 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
1326 /* success, update mbuf fields */
1329 /* failure, put packet into the death-row */
1336 inline_outb_mbuf_prepare(ss, mb, k);
1338 /* copy not processed mbufs beyond good ones */
1339 if (k != n && k != 0)
1340 mbuf_bulk_copy(mb + k, dr, n - k);
1346 * process group of ESP outbound transport packets destined for
1347 * INLINE_CRYPTO type of device.
1350 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
1351 struct rte_mbuf *mb[], uint16_t num)
1354 uint32_t i, k, n, l2, l3;
1357 struct rte_ipsec_sa *sa;
1358 union sym_op_data icv;
1359 uint64_t iv[IPSEC_MAX_IV_QWORD];
1360 struct rte_mbuf *dr[num];
1365 sqn = esn_outb_update_sqn(sa, &n);
1367 rte_errno = EOVERFLOW;
1370 for (i = 0; i != n; i++) {
1375 sqc = rte_cpu_to_be_64(sqn + i);
1378 /* try to update the packet itself */
1379 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
1382 /* success, update mbuf fields */
1385 /* failure, put packet into the death-row */
1392 inline_outb_mbuf_prepare(ss, mb, k);
1394 /* copy not processed mbufs beyond good ones */
1395 if (k != n && k != 0)
1396 mbuf_bulk_copy(mb + k, dr, n - k);
1402 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1403 * actual processing is done by HW/PMD, just set flags and metadata.
1406 outb_inline_proto_process(const struct rte_ipsec_session *ss,
1407 struct rte_mbuf *mb[], uint16_t num)
1409 inline_outb_mbuf_prepare(ss, mb, num);
1414 * Select packet processing function for session on LOOKASIDE_NONE
1418 lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
1419 struct rte_ipsec_sa_pkt_func *pf)
1423 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1424 RTE_IPSEC_SATP_MODE_MASK;
1427 switch (sa->type & msk) {
1428 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1429 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1430 pf->prepare = inb_pkt_prepare;
1431 pf->process = inb_tun_pkt_process;
1433 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1434 pf->prepare = inb_pkt_prepare;
1435 pf->process = inb_trs_pkt_process;
1437 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1438 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1439 pf->prepare = outb_tun_prepare;
1440 pf->process = (sa->sqh_len != 0) ?
1441 outb_sqh_process : pkt_flag_process;
1443 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1444 pf->prepare = outb_trs_prepare;
1445 pf->process = (sa->sqh_len != 0) ?
1446 outb_sqh_process : pkt_flag_process;
1456 * Select packet processing function for session on INLINE_CRYPTO
1460 inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
1461 struct rte_ipsec_sa_pkt_func *pf)
1465 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1466 RTE_IPSEC_SATP_MODE_MASK;
1469 switch (sa->type & msk) {
1470 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1471 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1472 pf->process = inb_tun_pkt_process;
1474 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1475 pf->process = inb_trs_pkt_process;
1477 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1478 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1479 pf->process = inline_outb_tun_pkt_process;
1481 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1482 pf->process = inline_outb_trs_pkt_process;
1492 * Select packet processing function for given session based on SA parameters
1493 * and type of associated with the session device.
1496 ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
1497 const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
1502 pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 };
1505 case RTE_SECURITY_ACTION_TYPE_NONE:
1506 rc = lksd_none_pkt_func_select(sa, pf);
1508 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1509 rc = inline_crypto_pkt_func_select(sa, pf);
1511 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1512 if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
1513 RTE_IPSEC_SATP_DIR_IB)
1514 pf->process = pkt_flag_process;
1516 pf->process = outb_inline_proto_process;
1518 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
1519 pf->prepare = lksd_proto_prepare;
1520 pf->process = pkt_flag_process;