1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2020 Intel Corporation
10 #include <rte_cryptodev.h>
13 #include "ipsec_sqn.h"
19 typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
20 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
21 union sym_op_data *icv, uint8_t sqh_len);
24 * helper function to fill crypto_sym op for cipher+auth algorithms.
25 * used by outb_cop_prepare(), see below.
28 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
29 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
30 uint32_t pofs, uint32_t plen)
32 sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
33 sop->cipher.data.length = sa->ctp.cipher.length + plen;
34 sop->auth.data.offset = sa->ctp.auth.offset + pofs;
35 sop->auth.data.length = sa->ctp.auth.length + plen;
36 sop->auth.digest.data = icv->va;
37 sop->auth.digest.phys_addr = icv->pa;
41 * helper function to fill crypto_sym op for cipher+auth algorithms.
42 * used by outb_cop_prepare(), see below.
45 sop_aead_prepare(struct rte_crypto_sym_op *sop,
46 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
47 uint32_t pofs, uint32_t plen)
49 sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
50 sop->aead.data.length = sa->ctp.cipher.length + plen;
51 sop->aead.digest.data = icv->va;
52 sop->aead.digest.phys_addr = icv->pa;
53 sop->aead.aad.data = icv->va + sa->icv_len;
54 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
58 * setup crypto op and crypto sym op for ESP outbound packet.
61 outb_cop_prepare(struct rte_crypto_op *cop,
62 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
63 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
65 struct rte_crypto_sym_op *sop;
66 struct aead_gcm_iv *gcm;
67 struct aead_ccm_iv *ccm;
68 struct aead_chacha20_poly1305_iv *chacha20_poly1305;
69 struct aesctr_cnt_blk *ctr;
74 /* fill sym op fields */
78 case ALGO_TYPE_AES_CBC:
79 /* Cipher-Auth (AES-CBC *) case */
80 case ALGO_TYPE_3DES_CBC:
81 /* Cipher-Auth (3DES-CBC *) case */
84 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
86 case ALGO_TYPE_AES_GMAC:
88 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
90 /* fill AAD IV (located inside crypto op) */
91 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
93 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
95 case ALGO_TYPE_AES_GCM:
96 /* AEAD (AES_GCM) case */
97 sop_aead_prepare(sop, sa, icv, hlen, plen);
99 /* fill AAD IV (located inside crypto op) */
100 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
102 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
104 case ALGO_TYPE_AES_CCM:
105 /* AEAD (AES_CCM) case */
106 sop_aead_prepare(sop, sa, icv, hlen, plen);
108 /* fill AAD IV (located inside crypto op) */
109 ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
111 aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
113 case ALGO_TYPE_CHACHA20_POLY1305:
114 /* AEAD (CHACHA20_POLY) case */
115 sop_aead_prepare(sop, sa, icv, hlen, plen);
117 /* fill AAD IV (located inside crypto op) */
118 chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
119 struct aead_chacha20_poly1305_iv *,
121 aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
124 case ALGO_TYPE_AES_CTR:
125 /* Cipher-Auth (AES-CTR *) case */
126 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
128 /* fill CTR block (located inside crypto op) */
129 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
131 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
137 * setup/update packet data and metadata for ESP outbound tunnel case.
139 static inline int32_t
140 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
141 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
142 union sym_op_data *icv, uint8_t sqh_len)
144 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
146 struct rte_esp_hdr *esph;
147 struct rte_esp_tail *espt;
151 /* calculate extra header space required */
152 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
154 /* size of ipsec protected data */
156 plen = mb->pkt_len - l2len;
158 /* number of bytes to encrypt */
159 clen = plen + sizeof(*espt);
160 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
162 /* pad length + esp tail */
164 tlen = pdlen + sa->icv_len + sqh_len;
166 /* do append and prepend */
167 ml = rte_pktmbuf_lastseg(mb);
168 if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
172 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
177 pdofs = ml->data_len;
178 ml->data_len += tlen;
180 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
182 /* update pkt l2/l3 len */
183 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
186 /* copy tunnel pkt header */
187 rte_memcpy(ph, sa->hdr, sa->hdr_len);
189 /* if UDP encap is enabled update the dgram_len */
190 if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
191 struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
192 (ph - sizeof(struct rte_udp_hdr));
193 udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len -
194 sa->hdr_l3_off - sa->hdr_len);
197 /* update original and new ip header fields */
198 update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
199 mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
201 /* update spi, seqn and iv */
202 esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
203 iv = (uint64_t *)(esph + 1);
204 copy_iv(iv, ivp, sa->iv_len);
207 esph->seq = sqn_low32(sqc);
210 pdofs += pdlen + sa->sqh_len;
213 pdlen -= sizeof(*espt);
215 /* copy padding data */
216 rte_memcpy(pt, esp_pad_bytes, pdlen);
218 /* update esp trailer */
219 espt = (struct rte_esp_tail *)(pt + pdlen);
220 espt->pad_len = pdlen;
221 espt->next_proto = sa->proto;
223 /* set icv va/pa value(s) */
224 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
225 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
231 * for pure cryptodev (lookaside none) depending on SA settings,
232 * we might have to write some extra data to the packet.
235 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
236 const union sym_op_data *icv)
239 struct aead_gcm_aad *gaad;
240 struct aead_ccm_aad *caad;
241 struct aead_chacha20_poly1305_aad *chacha20_poly1305_aad;
243 /* insert SQN.hi between ESP trailer and ICV */
244 if (sa->sqh_len != 0) {
245 psqh = (uint32_t *)(icv->va - sa->sqh_len);
246 psqh[0] = sqn_hi32(sqc);
250 * fill IV and AAD fields, if any (aad fields are placed after icv),
251 * right now we support only one AEAD algorithm: AES-GCM .
253 switch (sa->algo_type) {
254 case ALGO_TYPE_AES_GCM:
255 if (sa->aad_len != 0) {
256 gaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
257 aead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa));
260 case ALGO_TYPE_AES_CCM:
261 if (sa->aad_len != 0) {
262 caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
263 aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
266 case ALGO_TYPE_CHACHA20_POLY1305:
267 if (sa->aad_len != 0) {
268 chacha20_poly1305_aad = (struct aead_chacha20_poly1305_aad *)
269 (icv->va + sa->icv_len);
270 aead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad,
271 sa->spi, sqc, IS_ESN(sa));
280 * setup/update packets and crypto ops for ESP outbound tunnel case.
283 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
284 struct rte_crypto_op *cop[], uint16_t num)
290 struct rte_ipsec_sa *sa;
291 struct rte_cryptodev_sym_session *cs;
292 union sym_op_data icv;
293 uint64_t iv[IPSEC_MAX_IV_QWORD];
300 sqn = esn_outb_update_sqn(sa, &n);
302 rte_errno = EOVERFLOW;
305 for (i = 0; i != n; i++) {
307 sqc = rte_cpu_to_be_64(sqn + i);
310 /* try to update the packet itself */
311 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
313 /* success, setup crypto op */
315 outb_pkt_xprepare(sa, sqc, &icv);
316 lksd_none_cop_prepare(cop[k], cs, mb[i]);
317 outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
319 /* failure, put packet into the death-row */
326 /* copy not prepared mbufs beyond good ones */
327 if (k != n && k != 0)
328 move_bad_mbufs(mb, dr, n, n - k);
334 * setup/update packet data and metadata for ESP outbound transport case.
336 static inline int32_t
337 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
338 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
339 union sym_op_data *icv, uint8_t sqh_len)
342 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
344 struct rte_esp_hdr *esph;
345 struct rte_esp_tail *espt;
348 uint32_t l2len, l3len;
353 uhlen = l2len + l3len;
354 plen = mb->pkt_len - uhlen;
356 /* calculate extra header space required */
357 hlen = sa->iv_len + sizeof(*esph);
359 /* number of bytes to encrypt */
360 clen = plen + sizeof(*espt);
361 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
363 /* pad length + esp tail */
365 tlen = pdlen + sa->icv_len + sqh_len;
367 /* do append and insert */
368 ml = rte_pktmbuf_lastseg(mb);
369 if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
372 /* prepend space for ESP header */
373 ph = rte_pktmbuf_prepend(mb, hlen);
378 pdofs = ml->data_len;
379 ml->data_len += tlen;
381 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
383 /* shift L2/L3 headers */
384 insert_esph(ph, ph + hlen, uhlen);
386 /* update ip header fields */
387 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
390 /* update spi, seqn and iv */
391 esph = (struct rte_esp_hdr *)(ph + uhlen);
392 iv = (uint64_t *)(esph + 1);
393 copy_iv(iv, ivp, sa->iv_len);
396 esph->seq = sqn_low32(sqc);
399 pdofs += pdlen + sa->sqh_len;
402 pdlen -= sizeof(*espt);
404 /* copy padding data */
405 rte_memcpy(pt, esp_pad_bytes, pdlen);
407 /* update esp trailer */
408 espt = (struct rte_esp_tail *)(pt + pdlen);
409 espt->pad_len = pdlen;
410 espt->next_proto = np;
412 /* set icv va/pa value(s) */
413 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
414 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
420 * setup/update packets and crypto ops for ESP outbound transport case.
423 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
424 struct rte_crypto_op *cop[], uint16_t num)
427 uint32_t i, k, n, l2, l3;
430 struct rte_ipsec_sa *sa;
431 struct rte_cryptodev_sym_session *cs;
432 union sym_op_data icv;
433 uint64_t iv[IPSEC_MAX_IV_QWORD];
440 sqn = esn_outb_update_sqn(sa, &n);
442 rte_errno = EOVERFLOW;
445 for (i = 0; i != n; i++) {
450 sqc = rte_cpu_to_be_64(sqn + i);
453 /* try to update the packet itself */
454 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
456 /* success, setup crypto op */
458 outb_pkt_xprepare(sa, sqc, &icv);
459 lksd_none_cop_prepare(cop[k], cs, mb[i]);
460 outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
462 /* failure, put packet into the death-row */
469 /* copy not prepared mbufs beyond good ones */
470 if (k != n && k != 0)
471 move_bad_mbufs(mb, dr, n, n - k);
477 static inline uint32_t
478 outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
479 uint32_t plen, void *iv)
482 struct aead_gcm_iv *gcm;
483 struct aead_ccm_iv *ccm;
484 struct aead_chacha20_poly1305_iv *chacha20_poly1305;
485 struct aesctr_cnt_blk *ctr;
488 switch (sa->algo_type) {
489 case ALGO_TYPE_AES_GCM:
491 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
493 case ALGO_TYPE_AES_CCM:
495 aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
497 case ALGO_TYPE_CHACHA20_POLY1305:
498 chacha20_poly1305 = iv;
499 aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
502 case ALGO_TYPE_AES_CTR:
504 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
508 *pofs += sa->ctp.auth.offset;
509 clen = plen + sa->ctp.auth.length;
514 cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
515 struct rte_mbuf *mb[], uint16_t num,
516 esp_outb_prepare_t prepare, uint32_t cofs_mask)
521 struct rte_ipsec_sa *sa;
524 union sym_op_data icv;
525 struct rte_crypto_va_iova_ptr iv[num];
526 struct rte_crypto_va_iova_ptr aad[num];
527 struct rte_crypto_va_iova_ptr dgst[num];
531 uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
536 sqn = esn_outb_update_sqn(sa, &n);
538 rte_errno = EOVERFLOW;
540 for (i = 0, k = 0; i != n; i++) {
545 /* calculate ESP header offset */
546 l4ofs[k] = (l2 + l3) & cofs_mask;
548 sqc = rte_cpu_to_be_64(sqn + i);
549 gen_iv(ivbuf[k], sqc);
551 /* try to update the packet itself */
552 rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
554 /* success, proceed with preparations */
557 outb_pkt_xprepare(sa, sqc, &icv);
559 /* get encrypted data offset and length */
560 clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
563 /* fill iv, digest and aad */
565 aad[k].va = icv.va + sa->icv_len;
566 dgst[k++].va = icv.va;
573 /* copy not prepared mbufs beyond good ones */
574 if (k != n && k != 0)
575 move_bad_mbufs(mb, dr, n, n - k);
577 /* convert mbufs to iovecs and do actual crypto/auth processing */
579 cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
585 cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
586 struct rte_mbuf *mb[], uint16_t num)
588 return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0);
592 cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
593 struct rte_mbuf *mb[], uint16_t num)
595 return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare,
600 * process outbound packets for SA with ESN support,
601 * for algorithms that require SQN.hibits to be implictly included
602 * into digest computation.
603 * In that case we have to move ICV bytes back to their proper place.
606 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
609 uint32_t i, k, icv_len, *icv, bytes;
611 struct rte_ipsec_sa *sa;
617 icv_len = sa->icv_len;
620 for (i = 0; i != num; i++) {
621 if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) {
622 ml = rte_pktmbuf_lastseg(mb[i]);
623 /* remove high-order 32 bits of esn from packet len */
624 mb[i]->pkt_len -= sa->sqh_len;
625 ml->data_len -= sa->sqh_len;
626 icv = rte_pktmbuf_mtod_offset(ml, void *,
627 ml->data_len - icv_len);
628 remove_sqh(icv, icv_len);
629 bytes += mb[i]->pkt_len;
634 sa->statistics.count += k;
635 sa->statistics.bytes += bytes;
637 /* handle unprocessed mbufs */
641 move_bad_mbufs(mb, dr, num, num - k);
648 * prepare packets for inline ipsec processing:
649 * set ol_flags and attach metadata.
652 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
653 struct rte_mbuf *mb[], uint16_t num)
655 uint32_t i, ol_flags, bytes;
657 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
659 for (i = 0; i != num; i++) {
661 mb[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
662 bytes += mb[i]->pkt_len;
664 rte_security_set_pkt_metadata(ss->security.ctx,
665 ss->security.ses, mb[i], NULL);
667 ss->sa->statistics.count += num;
668 ss->sa->statistics.bytes += bytes;
672 * process group of ESP outbound tunnel packets destined for
673 * INLINE_CRYPTO type of device.
676 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
677 struct rte_mbuf *mb[], uint16_t num)
683 struct rte_ipsec_sa *sa;
684 union sym_op_data icv;
685 uint64_t iv[IPSEC_MAX_IV_QWORD];
691 sqn = esn_outb_update_sqn(sa, &n);
693 rte_errno = EOVERFLOW;
696 for (i = 0; i != n; i++) {
698 sqc = rte_cpu_to_be_64(sqn + i);
701 /* try to update the packet itself */
702 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
706 /* failure, put packet into the death-row */
713 /* copy not processed mbufs beyond good ones */
714 if (k != n && k != 0)
715 move_bad_mbufs(mb, dr, n, n - k);
717 inline_outb_mbuf_prepare(ss, mb, k);
722 * process group of ESP outbound transport packets destined for
723 * INLINE_CRYPTO type of device.
726 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
727 struct rte_mbuf *mb[], uint16_t num)
733 struct rte_ipsec_sa *sa;
734 union sym_op_data icv;
735 uint64_t iv[IPSEC_MAX_IV_QWORD];
741 sqn = esn_outb_update_sqn(sa, &n);
743 rte_errno = EOVERFLOW;
746 for (i = 0; i != n; i++) {
748 sqc = rte_cpu_to_be_64(sqn + i);
751 /* try to update the packet itself */
752 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
756 /* failure, put packet into the death-row */
763 /* copy not processed mbufs beyond good ones */
764 if (k != n && k != 0)
765 move_bad_mbufs(mb, dr, n, n - k);
767 inline_outb_mbuf_prepare(ss, mb, k);
772 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
773 * actual processing is done by HW/PMD, just set flags and metadata.
776 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
777 struct rte_mbuf *mb[], uint16_t num)
779 inline_outb_mbuf_prepare(ss, mb, num);