1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
17 /* some helper structures */
19 struct rte_crypto_auth_xform *auth;
20 struct rte_crypto_cipher_xform *cipher;
21 struct rte_crypto_aead_xform *aead;
25 * helper routine, fills internal crypto_xform structure.
28 fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
29 const struct rte_ipsec_sa_prm *prm)
31 struct rte_crypto_sym_xform *xf, *xfn;
33 memset(xform, 0, sizeof(*xform));
35 xf = prm->crypto_xform;
41 /* for AEAD just one xform required */
42 if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
45 xform->aead = &xf->aead;
47 * CIPHER+AUTH xforms are expected in strict order,
48 * depending on SA direction:
49 * inbound: AUTH+CIPHER
50 * outbound: CIPHER+AUTH
52 } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
54 /* wrong order or no cipher */
55 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
56 xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
59 xform->auth = &xf->auth;
60 xform->cipher = &xfn->cipher;
64 /* wrong order or no auth */
65 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
66 xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
69 xform->cipher = &xf->cipher;
70 xform->auth = &xfn->auth;
76 uint64_t __rte_experimental
77 rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
83 ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
90 if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
93 * RFC 4303 recommends 64 as minimum window size.
94 * there is no point to use ESN mode without SQN window,
95 * so make sure we have at least 64 window when ESN is enalbed.
97 wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
98 RTE_IPSEC_SATP_ESN_DISABLE) ?
99 wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
101 n = replay_num_bucket(wsz);
104 if (n > WINDOW_BUCKET_MAX)
111 if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
112 sz *= REPLAY_SQN_NUM;
114 sz += sizeof(struct rte_ipsec_sa);
118 void __rte_experimental
119 rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
121 memset(sa, 0, sa->size);
125 * Determine expected SA type based on input parameters.
128 fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
134 if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
135 tp |= RTE_IPSEC_SATP_PROTO_AH;
136 else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
137 tp |= RTE_IPSEC_SATP_PROTO_ESP;
141 if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
142 tp |= RTE_IPSEC_SATP_DIR_OB;
143 else if (prm->ipsec_xform.direction ==
144 RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
145 tp |= RTE_IPSEC_SATP_DIR_IB;
149 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
150 if (prm->ipsec_xform.tunnel.type ==
151 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
152 tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
153 else if (prm->ipsec_xform.tunnel.type ==
154 RTE_SECURITY_IPSEC_TUNNEL_IPV6)
155 tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
159 if (prm->tun.next_proto == IPPROTO_IPIP)
160 tp |= RTE_IPSEC_SATP_IPV4;
161 else if (prm->tun.next_proto == IPPROTO_IPV6)
162 tp |= RTE_IPSEC_SATP_IPV6;
165 } else if (prm->ipsec_xform.mode ==
166 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
167 tp |= RTE_IPSEC_SATP_MODE_TRANS;
168 if (prm->trs.proto == IPPROTO_IPIP)
169 tp |= RTE_IPSEC_SATP_IPV4;
170 else if (prm->trs.proto == IPPROTO_IPV6)
171 tp |= RTE_IPSEC_SATP_IPV6;
177 /* check for ESN flag */
178 if (prm->ipsec_xform.options.esn == 0)
179 tp |= RTE_IPSEC_SATP_ESN_DISABLE;
181 tp |= RTE_IPSEC_SATP_ESN_ENABLE;
183 /* interpret flags */
184 if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
185 tp |= RTE_IPSEC_SATP_SQN_ATOM;
187 tp |= RTE_IPSEC_SATP_SQN_RAW;
194 * Init ESP inbound specific things.
197 esp_inb_init(struct rte_ipsec_sa *sa)
199 /* these params may differ with new algorithms support */
200 sa->ctp.auth.offset = 0;
201 sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
202 sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
203 sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
207 * Init ESP inbound tunnel specific things.
210 esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
212 sa->proto = prm->tun.next_proto;
217 * Init ESP outbound specific things.
220 esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
224 sa->sqn.outb.raw = 1;
226 /* these params may differ with new algorithms support */
227 sa->ctp.auth.offset = hlen;
228 sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
230 algo_type = sa->algo_type;
233 case ALGO_TYPE_AES_GCM:
234 case ALGO_TYPE_AES_CTR:
236 sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
238 sa->ctp.cipher.length = 0;
240 case ALGO_TYPE_AES_CBC:
241 case ALGO_TYPE_3DES_CBC:
242 sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
243 sa->ctp.cipher.length = sa->iv_len;
249 * Init ESP outbound tunnel specific things.
252 esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
254 sa->proto = prm->tun.next_proto;
255 sa->hdr_len = prm->tun.hdr_len;
256 sa->hdr_l3_off = prm->tun.hdr_l3_off;
257 memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
259 esp_outb_init(sa, sa->hdr_len);
263 * helper function, init SA structure.
266 esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
267 const struct crypto_xform *cxf)
269 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
270 RTE_IPSEC_SATP_MODE_MASK;
272 if (cxf->aead != NULL) {
273 switch (cxf->aead->algo) {
274 case RTE_CRYPTO_AEAD_AES_GCM:
276 sa->aad_len = sizeof(struct aead_gcm_aad);
277 sa->icv_len = cxf->aead->digest_length;
278 sa->iv_ofs = cxf->aead->iv.offset;
279 sa->iv_len = sizeof(uint64_t);
280 sa->pad_align = IPSEC_PAD_AES_GCM;
281 sa->algo_type = ALGO_TYPE_AES_GCM;
287 sa->icv_len = cxf->auth->digest_length;
288 sa->iv_ofs = cxf->cipher->iv.offset;
289 sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
291 switch (cxf->cipher->algo) {
292 case RTE_CRYPTO_CIPHER_NULL:
293 sa->pad_align = IPSEC_PAD_NULL;
295 sa->algo_type = ALGO_TYPE_NULL;
298 case RTE_CRYPTO_CIPHER_AES_CBC:
299 sa->pad_align = IPSEC_PAD_AES_CBC;
300 sa->iv_len = IPSEC_MAX_IV_SIZE;
301 sa->algo_type = ALGO_TYPE_AES_CBC;
304 case RTE_CRYPTO_CIPHER_AES_CTR:
306 sa->pad_align = IPSEC_PAD_AES_CTR;
307 sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
308 sa->algo_type = ALGO_TYPE_AES_CTR;
311 case RTE_CRYPTO_CIPHER_3DES_CBC:
313 sa->pad_align = IPSEC_PAD_3DES_CBC;
314 sa->iv_len = IPSEC_3DES_IV_SIZE;
315 sa->algo_type = ALGO_TYPE_3DES_CBC;
323 sa->udata = prm->userdata;
324 sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
325 sa->salt = prm->ipsec_xform.salt;
327 switch (sa->type & msk) {
328 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
329 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
330 esp_inb_tun_init(sa, prm);
332 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
335 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
336 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
337 esp_outb_tun_init(sa, prm);
339 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
340 esp_outb_init(sa, 0);
348 * helper function, init SA replay structure.
351 fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket)
353 sa->replay.win_sz = wnd_sz;
354 sa->replay.nb_bucket = nb_bucket;
355 sa->replay.bucket_index_mask = nb_bucket - 1;
356 sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
357 if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
358 sa->sqn.inb.rsn[1] = (struct replay_sqn *)
359 ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
362 int __rte_experimental
363 rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
372 /* determine SA type */
373 rc = fill_sa_type(prm, &type);
377 /* determine required size */
378 wsz = prm->replay_win_sz;
379 return ipsec_sa_size(type, &wsz, &nb);
382 int __rte_experimental
383 rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
389 struct crypto_xform cxf;
391 if (sa == NULL || prm == NULL)
394 /* determine SA type */
395 rc = fill_sa_type(prm, &type);
399 /* determine required size */
400 wsz = prm->replay_win_sz;
401 sz = ipsec_sa_size(type, &wsz, &nb);
404 else if (size < (uint32_t)sz)
407 /* only esp is supported right now */
408 if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
411 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
412 prm->tun.hdr_len > sizeof(sa->hdr))
415 rc = fill_crypto_xform(&cxf, type, prm);
425 /* check for ESN flag */
426 sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
427 UINT32_MAX : UINT64_MAX;
429 rc = esp_sa_init(sa, prm, &cxf);
431 rte_ipsec_sa_fini(sa);
433 /* fill replay window related fields */
435 fill_sa_replay(sa, wsz, nb);
441 mbuf_bulk_copy(struct rte_mbuf *dst[], struct rte_mbuf * const src[],
446 for (i = 0; i != num; i++)
451 * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.
454 lksd_none_cop_prepare(const struct rte_ipsec_session *ss,
455 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
458 struct rte_crypto_sym_op *sop;
460 for (i = 0; i != num; i++) {
462 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
463 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
464 cop[i]->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
466 __rte_crypto_sym_op_attach_sym_session(sop, ss->crypto.ses);
471 * setup crypto op and crypto sym op for ESP outbound packet.
474 esp_outb_cop_prepare(struct rte_crypto_op *cop,
475 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
476 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
478 struct rte_crypto_sym_op *sop;
479 struct aead_gcm_iv *gcm;
480 struct aesctr_cnt_blk *ctr;
481 uint8_t algo_type = sa->algo_type;
483 /* fill sym op fields */
487 case ALGO_TYPE_AES_CBC:
488 /* Cipher-Auth (AES-CBC *) case */
489 case ALGO_TYPE_3DES_CBC:
490 /* Cipher-Auth (3DES-CBC *) case */
493 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
494 sop->cipher.data.length = sa->ctp.cipher.length + plen;
495 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
496 sop->auth.data.length = sa->ctp.auth.length + plen;
497 sop->auth.digest.data = icv->va;
498 sop->auth.digest.phys_addr = icv->pa;
500 case ALGO_TYPE_AES_GCM:
501 /* AEAD (AES_GCM) case */
502 sop->aead.data.offset = sa->ctp.cipher.offset + hlen;
503 sop->aead.data.length = sa->ctp.cipher.length + plen;
504 sop->aead.digest.data = icv->va;
505 sop->aead.digest.phys_addr = icv->pa;
506 sop->aead.aad.data = icv->va + sa->icv_len;
507 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
509 /* fill AAD IV (located inside crypto op) */
510 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
512 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
514 case ALGO_TYPE_AES_CTR:
515 /* Cipher-Auth (AES-CTR *) case */
516 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
517 sop->cipher.data.length = sa->ctp.cipher.length + plen;
518 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
519 sop->auth.data.length = sa->ctp.auth.length + plen;
520 sop->auth.digest.data = icv->va;
521 sop->auth.digest.phys_addr = icv->pa;
523 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
525 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
533 * setup/update packet data and metadata for ESP outbound tunnel case.
535 static inline int32_t
536 esp_outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
537 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
538 union sym_op_data *icv)
540 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
542 struct esp_hdr *esph;
543 struct esp_tail *espt;
547 /* calculate extra header space required */
548 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
550 /* size of ipsec protected data */
552 plen = mb->pkt_len - mb->l2_len;
554 /* number of bytes to encrypt */
555 clen = plen + sizeof(*espt);
556 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
558 /* pad length + esp tail */
560 tlen = pdlen + sa->icv_len;
562 /* do append and prepend */
563 ml = rte_pktmbuf_lastseg(mb);
564 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
568 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
573 pdofs = ml->data_len;
574 ml->data_len += tlen;
576 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
578 /* update pkt l2/l3 len */
579 mb->l2_len = sa->hdr_l3_off;
580 mb->l3_len = sa->hdr_len - sa->hdr_l3_off;
582 /* copy tunnel pkt header */
583 rte_memcpy(ph, sa->hdr, sa->hdr_len);
585 /* update original and new ip header fields */
586 update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
589 /* update spi, seqn and iv */
590 esph = (struct esp_hdr *)(ph + sa->hdr_len);
591 iv = (uint64_t *)(esph + 1);
592 copy_iv(iv, ivp, sa->iv_len);
595 esph->seq = sqn_low32(sqc);
598 pdofs += pdlen + sa->sqh_len;
601 pdlen -= sizeof(*espt);
603 /* copy padding data */
604 rte_memcpy(pt, esp_pad_bytes, pdlen);
606 /* update esp trailer */
607 espt = (struct esp_tail *)(pt + pdlen);
608 espt->pad_len = pdlen;
609 espt->next_proto = sa->proto;
611 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
612 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
618 * for pure cryptodev (lookaside none) depending on SA settings,
619 * we might have to write some extra data to the packet.
622 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
623 const union sym_op_data *icv)
626 struct aead_gcm_aad *aad;
627 uint8_t algo_type = sa->algo_type;
629 /* insert SQN.hi between ESP trailer and ICV */
630 if (sa->sqh_len != 0) {
631 psqh = (uint32_t *)(icv->va - sa->sqh_len);
632 psqh[0] = sqn_hi32(sqc);
636 * fill IV and AAD fields, if any (aad fields are placed after icv),
637 * right now we support only one AEAD algorithm: AES-GCM .
639 if (algo_type == ALGO_TYPE_AES_GCM) {
640 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
641 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
646 * setup/update packets and crypto ops for ESP outbound tunnel case.
649 outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
650 struct rte_crypto_op *cop[], uint16_t num)
656 struct rte_ipsec_sa *sa;
657 union sym_op_data icv;
658 uint64_t iv[IPSEC_MAX_IV_QWORD];
659 struct rte_mbuf *dr[num];
664 sqn = esn_outb_update_sqn(sa, &n);
666 rte_errno = EOVERFLOW;
669 for (i = 0; i != n; i++) {
671 sqc = rte_cpu_to_be_64(sqn + i);
674 /* try to update the packet itself */
675 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
677 /* success, setup crypto op */
680 outb_pkt_xprepare(sa, sqc, &icv);
681 esp_outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
683 /* failure, put packet into the death-row */
691 lksd_none_cop_prepare(ss, mb, cop, k);
693 /* copy not prepared mbufs beyond good ones */
694 if (k != n && k != 0)
695 mbuf_bulk_copy(mb + k, dr, n - k);
701 * setup/update packet data and metadata for ESP outbound transport case.
703 static inline int32_t
704 esp_outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
705 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
706 uint32_t l2len, uint32_t l3len, union sym_op_data *icv)
709 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
711 struct esp_hdr *esph;
712 struct esp_tail *espt;
716 uhlen = l2len + l3len;
717 plen = mb->pkt_len - uhlen;
719 /* calculate extra header space required */
720 hlen = sa->iv_len + sizeof(*esph);
722 /* number of bytes to encrypt */
723 clen = plen + sizeof(*espt);
724 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
726 /* pad length + esp tail */
728 tlen = pdlen + sa->icv_len;
730 /* do append and insert */
731 ml = rte_pktmbuf_lastseg(mb);
732 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
735 /* prepend space for ESP header */
736 ph = rte_pktmbuf_prepend(mb, hlen);
741 pdofs = ml->data_len;
742 ml->data_len += tlen;
744 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
746 /* shift L2/L3 headers */
747 insert_esph(ph, ph + hlen, uhlen);
749 /* update ip header fields */
750 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
753 /* update spi, seqn and iv */
754 esph = (struct esp_hdr *)(ph + uhlen);
755 iv = (uint64_t *)(esph + 1);
756 copy_iv(iv, ivp, sa->iv_len);
759 esph->seq = sqn_low32(sqc);
762 pdofs += pdlen + sa->sqh_len;
765 pdlen -= sizeof(*espt);
767 /* copy padding data */
768 rte_memcpy(pt, esp_pad_bytes, pdlen);
770 /* update esp trailer */
771 espt = (struct esp_tail *)(pt + pdlen);
772 espt->pad_len = pdlen;
773 espt->next_proto = np;
775 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
776 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
782 * setup/update packets and crypto ops for ESP outbound transport case.
785 outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
786 struct rte_crypto_op *cop[], uint16_t num)
789 uint32_t i, k, n, l2, l3;
792 struct rte_ipsec_sa *sa;
793 union sym_op_data icv;
794 uint64_t iv[IPSEC_MAX_IV_QWORD];
795 struct rte_mbuf *dr[num];
800 sqn = esn_outb_update_sqn(sa, &n);
802 rte_errno = EOVERFLOW;
805 for (i = 0; i != n; i++) {
810 sqc = rte_cpu_to_be_64(sqn + i);
813 /* try to update the packet itself */
814 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
817 /* success, setup crypto op */
820 outb_pkt_xprepare(sa, sqc, &icv);
821 esp_outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
823 /* failure, put packet into the death-row */
831 lksd_none_cop_prepare(ss, mb, cop, k);
833 /* copy not prepared mbufs beyond good ones */
834 if (k != n && k != 0)
835 mbuf_bulk_copy(mb + k, dr, n - k);
841 * setup crypto op and crypto sym op for ESP inbound tunnel packet.
843 static inline int32_t
844 esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,
845 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
846 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
848 struct rte_crypto_sym_op *sop;
849 struct aead_gcm_iv *gcm;
850 struct aesctr_cnt_blk *ctr;
853 uint8_t algo_type = sa->algo_type;
855 clen = plen - sa->ctp.cipher.length;
856 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
859 /* fill sym op fields */
863 case ALGO_TYPE_AES_GCM:
864 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
865 sop->aead.data.length = clen;
866 sop->aead.digest.data = icv->va;
867 sop->aead.digest.phys_addr = icv->pa;
868 sop->aead.aad.data = icv->va + sa->icv_len;
869 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
871 /* fill AAD IV (located inside crypto op) */
872 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
874 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
875 pofs + sizeof(struct esp_hdr));
876 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
878 case ALGO_TYPE_AES_CBC:
879 case ALGO_TYPE_3DES_CBC:
880 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
881 sop->cipher.data.length = clen;
882 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
883 sop->auth.data.length = plen - sa->ctp.auth.length;
884 sop->auth.digest.data = icv->va;
885 sop->auth.digest.phys_addr = icv->pa;
887 /* copy iv from the input packet to the cop */
888 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
889 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
890 pofs + sizeof(struct esp_hdr));
891 copy_iv(ivc, ivp, sa->iv_len);
893 case ALGO_TYPE_AES_CTR:
894 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
895 sop->cipher.data.length = clen;
896 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
897 sop->auth.data.length = plen - sa->ctp.auth.length;
898 sop->auth.digest.data = icv->va;
899 sop->auth.digest.phys_addr = icv->pa;
901 /* copy iv from the input packet to the cop */
902 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
904 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
905 pofs + sizeof(struct esp_hdr));
906 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
909 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
910 sop->cipher.data.length = clen;
911 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
912 sop->auth.data.length = plen - sa->ctp.auth.length;
913 sop->auth.digest.data = icv->va;
914 sop->auth.digest.phys_addr = icv->pa;
925 * for pure cryptodev (lookaside none) depending on SA settings,
926 * we might have to write some extra data to the packet.
929 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
930 const union sym_op_data *icv)
932 struct aead_gcm_aad *aad;
934 /* insert SQN.hi between ESP trailer and ICV */
935 if (sa->sqh_len != 0)
936 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
939 * fill AAD fields, if any (aad fields are placed after icv),
940 * right now we support only one AEAD algorithm: AES-GCM.
942 if (sa->aad_len != 0) {
943 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
944 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
949 * setup/update packet data and metadata for ESP inbound tunnel case.
951 static inline int32_t
952 esp_inb_tun_pkt_prepare(const struct rte_ipsec_sa *sa,
953 const struct replay_sqn *rsn, struct rte_mbuf *mb,
954 uint32_t hlen, union sym_op_data *icv)
958 uint32_t icv_ofs, plen;
960 struct esp_hdr *esph;
962 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
965 * retrieve and reconstruct SQN, then check it, then
966 * convert it back into network byte order.
968 sqn = rte_be_to_cpu_32(esph->seq);
970 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
972 rc = esn_inb_check_sqn(rsn, sa, sqn);
976 sqn = rte_cpu_to_be_64(sqn);
978 /* start packet manipulation */
982 ml = rte_pktmbuf_lastseg(mb);
983 icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
985 /* we have to allocate space for AAD somewhere,
986 * right now - just use free trailing space at the last segment.
987 * Would probably be more convenient to reserve space for AAD
988 * inside rte_crypto_op itself
989 * (again for IV space is already reserved inside cop).
991 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
994 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
995 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
997 inb_pkt_xprepare(sa, sqn, icv);
1002 * setup/update packets and crypto ops for ESP inbound case.
1005 inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1006 struct rte_crypto_op *cop[], uint16_t num)
1010 struct rte_ipsec_sa *sa;
1011 struct replay_sqn *rsn;
1012 union sym_op_data icv;
1013 struct rte_mbuf *dr[num];
1016 rsn = rsn_acquire(sa);
1019 for (i = 0; i != num; i++) {
1021 hl = mb[i]->l2_len + mb[i]->l3_len;
1022 rc = esp_inb_tun_pkt_prepare(sa, rsn, mb[i], hl, &icv);
1024 rc = esp_inb_tun_cop_prepare(cop[k], sa, mb[i], &icv,
1035 rsn_release(sa, rsn);
1038 lksd_none_cop_prepare(ss, mb, cop, k);
1040 /* copy not prepared mbufs beyond good ones */
1041 if (k != num && k != 0)
1042 mbuf_bulk_copy(mb + k, dr, num - k);
1048 * setup crypto ops for LOOKASIDE_PROTO type of devices.
1051 lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
1052 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
1055 struct rte_crypto_sym_op *sop;
1057 for (i = 0; i != num; i++) {
1059 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1060 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1061 cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
1063 __rte_security_attach_session(sop, ss->security.ses);
1068 * setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
1069 * Note that for LOOKASIDE_PROTO all packet modifications will be
1070 * performed by PMD/HW.
1071 * SW has only to prepare crypto op.
1074 lksd_proto_prepare(const struct rte_ipsec_session *ss,
1075 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
1077 lksd_proto_cop_prepare(ss, mb, cop, num);
1082 * process ESP inbound tunnel packet.
1085 esp_inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
1088 uint32_t hlen, icv_len, tlen;
1089 struct esp_hdr *esph;
1090 struct esp_tail *espt;
1091 struct rte_mbuf *ml;
1094 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1097 icv_len = sa->icv_len;
1099 ml = rte_pktmbuf_lastseg(mb);
1100 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1101 ml->data_len - icv_len - sizeof(*espt));
1104 * check padding and next proto.
1105 * return an error if something is wrong.
1107 pd = (char *)espt - espt->pad_len;
1108 if (espt->next_proto != sa->proto ||
1109 memcmp(pd, esp_pad_bytes, espt->pad_len))
1112 /* cut of ICV, ESP tail and padding bytes */
1113 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1114 ml->data_len -= tlen;
1115 mb->pkt_len -= tlen;
1117 /* cut of L2/L3 headers, ESP header and IV */
1118 hlen = mb->l2_len + mb->l3_len;
1119 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
1120 rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);
1122 /* retrieve SQN for later check */
1123 *sqn = rte_be_to_cpu_32(esph->seq);
1125 /* reset mbuf metatdata: L2/L3 len, packet type */
1126 mb->packet_type = RTE_PTYPE_UNKNOWN;
1130 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1131 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1136 * process ESP inbound transport packet.
1139 esp_inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
1142 uint32_t hlen, icv_len, l2len, l3len, tlen;
1143 struct esp_hdr *esph;
1144 struct esp_tail *espt;
1145 struct rte_mbuf *ml;
1148 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1151 icv_len = sa->icv_len;
1153 ml = rte_pktmbuf_lastseg(mb);
1154 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1155 ml->data_len - icv_len - sizeof(*espt));
1157 /* check padding, return an error if something is wrong. */
1158 pd = (char *)espt - espt->pad_len;
1159 if (memcmp(pd, esp_pad_bytes, espt->pad_len))
1162 /* cut of ICV, ESP tail and padding bytes */
1163 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1164 ml->data_len -= tlen;
1165 mb->pkt_len -= tlen;
1167 /* retrieve SQN for later check */
1170 hlen = l2len + l3len;
1171 op = rte_pktmbuf_mtod(mb, char *);
1172 esph = (struct esp_hdr *)(op + hlen);
1173 *sqn = rte_be_to_cpu_32(esph->seq);
1175 /* cut off ESP header and IV, update L3 header */
1176 np = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);
1177 remove_esph(np, op, hlen);
1178 update_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,
1181 /* reset mbuf packet type */
1182 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
1184 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1185 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1190 * for group of ESP inbound packets perform SQN check and update.
1192 static inline uint16_t
1193 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
1194 struct rte_mbuf *mb[], struct rte_mbuf *dr[], uint16_t num)
1197 struct replay_sqn *rsn;
1199 rsn = rsn_update_start(sa);
1202 for (i = 0; i != num; i++) {
1203 if (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)
1209 rsn_update_finish(sa, rsn);
1214 * process group of ESP inbound tunnel packets.
1217 inb_tun_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1221 struct rte_ipsec_sa *sa;
1223 struct rte_mbuf *dr[num];
1227 /* process packets, extract seq numbers */
1230 for (i = 0; i != num; i++) {
1232 if (esp_inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)
1234 /* bad packet, will drop from furhter processing */
1239 /* update seq # and replay winow */
1240 k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k);
1242 /* handle unprocessed mbufs */
1244 rte_errno = EBADMSG;
1246 mbuf_bulk_copy(mb + k, dr, num - k);
1253 * process group of ESP inbound transport packets.
1256 inb_trs_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1261 struct rte_ipsec_sa *sa;
1262 struct rte_mbuf *dr[num];
1266 /* process packets, extract seq numbers */
1269 for (i = 0; i != num; i++) {
1271 if (esp_inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)
1273 /* bad packet, will drop from furhter processing */
1278 /* update seq # and replay winow */
1279 k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k);
1281 /* handle unprocessed mbufs */
1283 rte_errno = EBADMSG;
1285 mbuf_bulk_copy(mb + k, dr, num - k);
1292 * process outbound packets for SA with ESN support,
1293 * for algorithms that require SQN.hibits to be implictly included
1294 * into digest computation.
1295 * In that case we have to move ICV bytes back to their proper place.
1298 outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1301 uint32_t i, k, icv_len, *icv;
1302 struct rte_mbuf *ml;
1303 struct rte_ipsec_sa *sa;
1304 struct rte_mbuf *dr[num];
1309 icv_len = sa->icv_len;
1311 for (i = 0; i != num; i++) {
1312 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
1313 ml = rte_pktmbuf_lastseg(mb[i]);
1314 icv = rte_pktmbuf_mtod_offset(ml, void *,
1315 ml->data_len - icv_len);
1316 remove_sqh(icv, icv_len);
1322 /* handle unprocessed mbufs */
1324 rte_errno = EBADMSG;
1326 mbuf_bulk_copy(mb + k, dr, num - k);
1333 * simplest pkt process routine:
1334 * all actual processing is already done by HW/PMD,
1335 * just check mbuf ol_flags.
1337 * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
1338 * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
1339 * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
1342 pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1346 struct rte_mbuf *dr[num];
1351 for (i = 0; i != num; i++) {
1352 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
1358 /* handle unprocessed mbufs */
1360 rte_errno = EBADMSG;
1362 mbuf_bulk_copy(mb + k, dr, num - k);
1369 * prepare packets for inline ipsec processing:
1370 * set ol_flags and attach metadata.
1373 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
1374 struct rte_mbuf *mb[], uint16_t num)
1376 uint32_t i, ol_flags;
1378 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
1379 for (i = 0; i != num; i++) {
1381 mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
1383 rte_security_set_pkt_metadata(ss->security.ctx,
1384 ss->security.ses, mb[i], NULL);
1389 * process group of ESP outbound tunnel packets destined for
1390 * INLINE_CRYPTO type of device.
1393 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
1394 struct rte_mbuf *mb[], uint16_t num)
1400 struct rte_ipsec_sa *sa;
1401 union sym_op_data icv;
1402 uint64_t iv[IPSEC_MAX_IV_QWORD];
1403 struct rte_mbuf *dr[num];
1408 sqn = esn_outb_update_sqn(sa, &n);
1410 rte_errno = EOVERFLOW;
1413 for (i = 0; i != n; i++) {
1415 sqc = rte_cpu_to_be_64(sqn + i);
1418 /* try to update the packet itself */
1419 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
1421 /* success, update mbuf fields */
1424 /* failure, put packet into the death-row */
1431 inline_outb_mbuf_prepare(ss, mb, k);
1433 /* copy not processed mbufs beyond good ones */
1434 if (k != n && k != 0)
1435 mbuf_bulk_copy(mb + k, dr, n - k);
1441 * process group of ESP outbound transport packets destined for
1442 * INLINE_CRYPTO type of device.
1445 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
1446 struct rte_mbuf *mb[], uint16_t num)
1449 uint32_t i, k, n, l2, l3;
1452 struct rte_ipsec_sa *sa;
1453 union sym_op_data icv;
1454 uint64_t iv[IPSEC_MAX_IV_QWORD];
1455 struct rte_mbuf *dr[num];
1460 sqn = esn_outb_update_sqn(sa, &n);
1462 rte_errno = EOVERFLOW;
1465 for (i = 0; i != n; i++) {
1470 sqc = rte_cpu_to_be_64(sqn + i);
1473 /* try to update the packet itself */
1474 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
1477 /* success, update mbuf fields */
1480 /* failure, put packet into the death-row */
1487 inline_outb_mbuf_prepare(ss, mb, k);
1489 /* copy not processed mbufs beyond good ones */
1490 if (k != n && k != 0)
1491 mbuf_bulk_copy(mb + k, dr, n - k);
1497 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1498 * actual processing is done by HW/PMD, just set flags and metadata.
1501 outb_inline_proto_process(const struct rte_ipsec_session *ss,
1502 struct rte_mbuf *mb[], uint16_t num)
1504 inline_outb_mbuf_prepare(ss, mb, num);
1509 * Select packet processing function for session on LOOKASIDE_NONE
1513 lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
1514 struct rte_ipsec_sa_pkt_func *pf)
1518 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1519 RTE_IPSEC_SATP_MODE_MASK;
1522 switch (sa->type & msk) {
1523 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1524 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1525 pf->prepare = inb_pkt_prepare;
1526 pf->process = inb_tun_pkt_process;
1528 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1529 pf->prepare = inb_pkt_prepare;
1530 pf->process = inb_trs_pkt_process;
1532 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1533 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1534 pf->prepare = outb_tun_prepare;
1535 pf->process = (sa->sqh_len != 0) ?
1536 outb_sqh_process : pkt_flag_process;
1538 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1539 pf->prepare = outb_trs_prepare;
1540 pf->process = (sa->sqh_len != 0) ?
1541 outb_sqh_process : pkt_flag_process;
1551 * Select packet processing function for session on INLINE_CRYPTO
1555 inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
1556 struct rte_ipsec_sa_pkt_func *pf)
1560 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1561 RTE_IPSEC_SATP_MODE_MASK;
1564 switch (sa->type & msk) {
1565 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1566 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1567 pf->process = inb_tun_pkt_process;
1569 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1570 pf->process = inb_trs_pkt_process;
1572 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1573 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1574 pf->process = inline_outb_tun_pkt_process;
1576 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1577 pf->process = inline_outb_trs_pkt_process;
1587 * Select packet processing function for given session based on SA parameters
1588 * and type of associated with the session device.
1591 ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
1592 const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
1597 pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 };
1600 case RTE_SECURITY_ACTION_TYPE_NONE:
1601 rc = lksd_none_pkt_func_select(sa, pf);
1603 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1604 rc = inline_crypto_pkt_func_select(sa, pf);
1606 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1607 if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
1608 RTE_IPSEC_SATP_DIR_IB)
1609 pf->process = pkt_flag_process;
1611 pf->process = outb_inline_proto_process;
1613 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
1614 pf->prepare = lksd_proto_prepare;
1615 pf->process = pkt_flag_process;