1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2020 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
18 typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
19 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
20 union sym_op_data *icv, uint8_t sqh_len);
23 * helper function to fill crypto_sym op for cipher+auth algorithms.
24 * used by outb_cop_prepare(), see below.
27 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
28 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
29 uint32_t pofs, uint32_t plen)
31 sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
32 sop->cipher.data.length = sa->ctp.cipher.length + plen;
33 sop->auth.data.offset = sa->ctp.auth.offset + pofs;
34 sop->auth.data.length = sa->ctp.auth.length + plen;
35 sop->auth.digest.data = icv->va;
36 sop->auth.digest.phys_addr = icv->pa;
40 * helper function to fill crypto_sym op for cipher+auth algorithms.
41 * used by outb_cop_prepare(), see below.
44 sop_aead_prepare(struct rte_crypto_sym_op *sop,
45 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
46 uint32_t pofs, uint32_t plen)
48 sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
49 sop->aead.data.length = sa->ctp.cipher.length + plen;
50 sop->aead.digest.data = icv->va;
51 sop->aead.digest.phys_addr = icv->pa;
52 sop->aead.aad.data = icv->va + sa->icv_len;
53 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
57 * setup crypto op and crypto sym op for ESP outbound packet.
60 outb_cop_prepare(struct rte_crypto_op *cop,
61 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
62 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
64 struct rte_crypto_sym_op *sop;
65 struct aead_gcm_iv *gcm;
66 struct aesctr_cnt_blk *ctr;
71 /* fill sym op fields */
75 case ALGO_TYPE_AES_CBC:
76 /* Cipher-Auth (AES-CBC *) case */
77 case ALGO_TYPE_3DES_CBC:
78 /* Cipher-Auth (3DES-CBC *) case */
81 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
83 case ALGO_TYPE_AES_GCM:
84 /* AEAD (AES_GCM) case */
85 sop_aead_prepare(sop, sa, icv, hlen, plen);
87 /* fill AAD IV (located inside crypto op) */
88 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
90 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
92 case ALGO_TYPE_AES_CTR:
93 /* Cipher-Auth (AES-CTR *) case */
94 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
96 /* fill CTR block (located inside crypto op) */
97 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
99 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
105 * setup/update packet data and metadata for ESP outbound tunnel case.
107 static inline int32_t
108 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
109 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
110 union sym_op_data *icv, uint8_t sqh_len)
112 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
114 struct rte_esp_hdr *esph;
115 struct rte_esp_tail *espt;
119 /* calculate extra header space required */
120 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
122 /* size of ipsec protected data */
124 plen = mb->pkt_len - l2len;
126 /* number of bytes to encrypt */
127 clen = plen + sizeof(*espt);
128 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
130 /* pad length + esp tail */
132 tlen = pdlen + sa->icv_len + sqh_len;
134 /* do append and prepend */
135 ml = rte_pktmbuf_lastseg(mb);
136 if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
140 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
145 pdofs = ml->data_len;
146 ml->data_len += tlen;
148 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
150 /* update pkt l2/l3 len */
151 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
154 /* copy tunnel pkt header */
155 rte_memcpy(ph, sa->hdr, sa->hdr_len);
157 /* update original and new ip header fields */
158 update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
159 mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
161 /* update spi, seqn and iv */
162 esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
163 iv = (uint64_t *)(esph + 1);
164 copy_iv(iv, ivp, sa->iv_len);
167 esph->seq = sqn_low32(sqc);
170 pdofs += pdlen + sa->sqh_len;
173 pdlen -= sizeof(*espt);
175 /* copy padding data */
176 rte_memcpy(pt, esp_pad_bytes, pdlen);
178 /* update esp trailer */
179 espt = (struct rte_esp_tail *)(pt + pdlen);
180 espt->pad_len = pdlen;
181 espt->next_proto = sa->proto;
183 /* set icv va/pa value(s) */
184 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
185 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
191 * for pure cryptodev (lookaside none) depending on SA settings,
192 * we might have to write some extra data to the packet.
195 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
196 const union sym_op_data *icv)
199 struct aead_gcm_aad *aad;
201 /* insert SQN.hi between ESP trailer and ICV */
202 if (sa->sqh_len != 0) {
203 psqh = (uint32_t *)(icv->va - sa->sqh_len);
204 psqh[0] = sqn_hi32(sqc);
208 * fill IV and AAD fields, if any (aad fields are placed after icv),
209 * right now we support only one AEAD algorithm: AES-GCM .
211 if (sa->aad_len != 0) {
212 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
213 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
218 * setup/update packets and crypto ops for ESP outbound tunnel case.
221 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
222 struct rte_crypto_op *cop[], uint16_t num)
228 struct rte_ipsec_sa *sa;
229 struct rte_cryptodev_sym_session *cs;
230 union sym_op_data icv;
231 uint64_t iv[IPSEC_MAX_IV_QWORD];
238 sqn = esn_outb_update_sqn(sa, &n);
240 rte_errno = EOVERFLOW;
243 for (i = 0; i != n; i++) {
245 sqc = rte_cpu_to_be_64(sqn + i);
248 /* try to update the packet itself */
249 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
251 /* success, setup crypto op */
253 outb_pkt_xprepare(sa, sqc, &icv);
254 lksd_none_cop_prepare(cop[k], cs, mb[i]);
255 outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
257 /* failure, put packet into the death-row */
264 /* copy not prepared mbufs beyond good ones */
265 if (k != n && k != 0)
266 move_bad_mbufs(mb, dr, n, n - k);
272 * setup/update packet data and metadata for ESP outbound transport case.
274 static inline int32_t
275 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
276 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
277 union sym_op_data *icv, uint8_t sqh_len)
280 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
282 struct rte_esp_hdr *esph;
283 struct rte_esp_tail *espt;
286 uint32_t l2len, l3len;
291 uhlen = l2len + l3len;
292 plen = mb->pkt_len - uhlen;
294 /* calculate extra header space required */
295 hlen = sa->iv_len + sizeof(*esph);
297 /* number of bytes to encrypt */
298 clen = plen + sizeof(*espt);
299 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
301 /* pad length + esp tail */
303 tlen = pdlen + sa->icv_len + sqh_len;
305 /* do append and insert */
306 ml = rte_pktmbuf_lastseg(mb);
307 if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
310 /* prepend space for ESP header */
311 ph = rte_pktmbuf_prepend(mb, hlen);
316 pdofs = ml->data_len;
317 ml->data_len += tlen;
319 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
321 /* shift L2/L3 headers */
322 insert_esph(ph, ph + hlen, uhlen);
324 /* update ip header fields */
325 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
328 /* update spi, seqn and iv */
329 esph = (struct rte_esp_hdr *)(ph + uhlen);
330 iv = (uint64_t *)(esph + 1);
331 copy_iv(iv, ivp, sa->iv_len);
334 esph->seq = sqn_low32(sqc);
337 pdofs += pdlen + sa->sqh_len;
340 pdlen -= sizeof(*espt);
342 /* copy padding data */
343 rte_memcpy(pt, esp_pad_bytes, pdlen);
345 /* update esp trailer */
346 espt = (struct rte_esp_tail *)(pt + pdlen);
347 espt->pad_len = pdlen;
348 espt->next_proto = np;
350 /* set icv va/pa value(s) */
351 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
352 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
358 * setup/update packets and crypto ops for ESP outbound transport case.
361 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
362 struct rte_crypto_op *cop[], uint16_t num)
365 uint32_t i, k, n, l2, l3;
368 struct rte_ipsec_sa *sa;
369 struct rte_cryptodev_sym_session *cs;
370 union sym_op_data icv;
371 uint64_t iv[IPSEC_MAX_IV_QWORD];
378 sqn = esn_outb_update_sqn(sa, &n);
380 rte_errno = EOVERFLOW;
383 for (i = 0; i != n; i++) {
388 sqc = rte_cpu_to_be_64(sqn + i);
391 /* try to update the packet itself */
392 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
394 /* success, setup crypto op */
396 outb_pkt_xprepare(sa, sqc, &icv);
397 lksd_none_cop_prepare(cop[k], cs, mb[i]);
398 outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
400 /* failure, put packet into the death-row */
407 /* copy not prepared mbufs beyond good ones */
408 if (k != n && k != 0)
409 move_bad_mbufs(mb, dr, n, n - k);
415 static inline uint32_t
416 outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
417 uint32_t plen, void *iv)
420 struct aead_gcm_iv *gcm;
421 struct aesctr_cnt_blk *ctr;
424 switch (sa->algo_type) {
425 case ALGO_TYPE_AES_GCM:
427 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
429 case ALGO_TYPE_AES_CTR:
431 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
435 *pofs += sa->ctp.auth.offset;
436 clen = plen + sa->ctp.auth.length;
441 cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
442 struct rte_mbuf *mb[], uint16_t num,
443 esp_outb_prepare_t prepare, uint32_t cofs_mask)
448 struct rte_ipsec_sa *sa;
451 union sym_op_data icv;
452 struct rte_crypto_va_iova_ptr iv[num];
453 struct rte_crypto_va_iova_ptr aad[num];
454 struct rte_crypto_va_iova_ptr dgst[num];
458 uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
463 sqn = esn_outb_update_sqn(sa, &n);
465 rte_errno = EOVERFLOW;
467 for (i = 0, k = 0; i != n; i++) {
472 /* calculate ESP header offset */
473 l4ofs[k] = (l2 + l3) & cofs_mask;
475 sqc = rte_cpu_to_be_64(sqn + i);
476 gen_iv(ivbuf[k], sqc);
478 /* try to update the packet itself */
479 rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
481 /* success, proceed with preparations */
484 outb_pkt_xprepare(sa, sqc, &icv);
486 /* get encrypted data offset and length */
487 clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
490 /* fill iv, digest and aad */
492 aad[k].va = icv.va + sa->icv_len;
493 dgst[k++].va = icv.va;
500 /* copy not prepared mbufs beyond good ones */
501 if (k != n && k != 0)
502 move_bad_mbufs(mb, dr, n, n - k);
504 /* convert mbufs to iovecs and do actual crypto/auth processing */
506 cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
512 cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
513 struct rte_mbuf *mb[], uint16_t num)
515 return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0);
519 cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
520 struct rte_mbuf *mb[], uint16_t num)
522 return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare,
527 * process outbound packets for SA with ESN support,
528 * for algorithms that require SQN.hibits to be implictly included
529 * into digest computation.
530 * In that case we have to move ICV bytes back to their proper place.
533 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
536 uint32_t i, k, icv_len, *icv;
538 struct rte_ipsec_sa *sa;
544 icv_len = sa->icv_len;
546 for (i = 0; i != num; i++) {
547 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
548 ml = rte_pktmbuf_lastseg(mb[i]);
549 /* remove high-order 32 bits of esn from packet len */
550 mb[i]->pkt_len -= sa->sqh_len;
551 ml->data_len -= sa->sqh_len;
552 icv = rte_pktmbuf_mtod_offset(ml, void *,
553 ml->data_len - icv_len);
554 remove_sqh(icv, icv_len);
560 /* handle unprocessed mbufs */
564 move_bad_mbufs(mb, dr, num, num - k);
571 * prepare packets for inline ipsec processing:
572 * set ol_flags and attach metadata.
575 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
576 struct rte_mbuf *mb[], uint16_t num)
578 uint32_t i, ol_flags;
580 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
581 for (i = 0; i != num; i++) {
583 mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
585 rte_security_set_pkt_metadata(ss->security.ctx,
586 ss->security.ses, mb[i], NULL);
591 * process group of ESP outbound tunnel packets destined for
592 * INLINE_CRYPTO type of device.
595 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
596 struct rte_mbuf *mb[], uint16_t num)
602 struct rte_ipsec_sa *sa;
603 union sym_op_data icv;
604 uint64_t iv[IPSEC_MAX_IV_QWORD];
610 sqn = esn_outb_update_sqn(sa, &n);
612 rte_errno = EOVERFLOW;
615 for (i = 0; i != n; i++) {
617 sqc = rte_cpu_to_be_64(sqn + i);
620 /* try to update the packet itself */
621 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
625 /* failure, put packet into the death-row */
632 /* copy not processed mbufs beyond good ones */
633 if (k != n && k != 0)
634 move_bad_mbufs(mb, dr, n, n - k);
636 inline_outb_mbuf_prepare(ss, mb, k);
641 * process group of ESP outbound transport packets destined for
642 * INLINE_CRYPTO type of device.
645 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
646 struct rte_mbuf *mb[], uint16_t num)
652 struct rte_ipsec_sa *sa;
653 union sym_op_data icv;
654 uint64_t iv[IPSEC_MAX_IV_QWORD];
660 sqn = esn_outb_update_sqn(sa, &n);
662 rte_errno = EOVERFLOW;
665 for (i = 0; i != n; i++) {
667 sqc = rte_cpu_to_be_64(sqn + i);
670 /* try to update the packet itself */
671 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
675 /* failure, put packet into the death-row */
682 /* copy not processed mbufs beyond good ones */
683 if (k != n && k != 0)
684 move_bad_mbufs(mb, dr, n, n - k);
686 inline_outb_mbuf_prepare(ss, mb, k);
691 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
692 * actual processing is done by HW/PMD, just set flags and metadata.
695 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
696 struct rte_mbuf *mb[], uint16_t num)
698 inline_outb_mbuf_prepare(ss, mb, num);