1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
20 * helper function to fill crypto_sym op for cipher+auth algorithms.
21 * used by outb_cop_prepare(), see below.
24 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
25 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
26 uint32_t pofs, uint32_t plen)
28 sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
29 sop->cipher.data.length = sa->ctp.cipher.length + plen;
30 sop->auth.data.offset = sa->ctp.auth.offset + pofs;
31 sop->auth.data.length = sa->ctp.auth.length + plen;
32 sop->auth.digest.data = icv->va;
33 sop->auth.digest.phys_addr = icv->pa;
37 * helper function to fill crypto_sym op for cipher+auth algorithms.
38 * used by outb_cop_prepare(), see below.
41 sop_aead_prepare(struct rte_crypto_sym_op *sop,
42 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
43 uint32_t pofs, uint32_t plen)
45 sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
46 sop->aead.data.length = sa->ctp.cipher.length + plen;
47 sop->aead.digest.data = icv->va;
48 sop->aead.digest.phys_addr = icv->pa;
49 sop->aead.aad.data = icv->va + sa->icv_len;
50 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
54 * setup crypto op and crypto sym op for ESP outbound packet.
57 outb_cop_prepare(struct rte_crypto_op *cop,
58 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
59 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
61 struct rte_crypto_sym_op *sop;
62 struct aead_gcm_iv *gcm;
63 struct aesctr_cnt_blk *ctr;
68 /* fill sym op fields */
72 case ALGO_TYPE_AES_CBC:
73 /* Cipher-Auth (AES-CBC *) case */
74 case ALGO_TYPE_3DES_CBC:
75 /* Cipher-Auth (3DES-CBC *) case */
78 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
80 case ALGO_TYPE_AES_GCM:
81 /* AEAD (AES_GCM) case */
82 sop_aead_prepare(sop, sa, icv, hlen, plen);
84 /* fill AAD IV (located inside crypto op) */
85 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
87 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
89 case ALGO_TYPE_AES_CTR:
90 /* Cipher-Auth (AES-CTR *) case */
91 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
93 /* fill CTR block (located inside crypto op) */
94 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
96 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
102 * setup/update packet data and metadata for ESP outbound tunnel case.
104 static inline int32_t
105 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
106 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
107 union sym_op_data *icv, uint8_t sqh_len)
109 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
111 struct rte_esp_hdr *esph;
112 struct rte_esp_tail *espt;
116 /* calculate extra header space required */
117 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
119 /* size of ipsec protected data */
121 plen = mb->pkt_len - l2len;
123 /* number of bytes to encrypt */
124 clen = plen + sizeof(*espt);
125 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
127 /* pad length + esp tail */
129 tlen = pdlen + sa->icv_len + sqh_len;
131 /* do append and prepend */
132 ml = rte_pktmbuf_lastseg(mb);
133 if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
137 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
142 pdofs = ml->data_len;
143 ml->data_len += tlen;
145 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
147 /* update pkt l2/l3 len */
148 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
151 /* copy tunnel pkt header */
152 rte_memcpy(ph, sa->hdr, sa->hdr_len);
154 /* update original and new ip header fields */
155 update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
156 mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
158 /* update spi, seqn and iv */
159 esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
160 iv = (uint64_t *)(esph + 1);
161 copy_iv(iv, ivp, sa->iv_len);
164 esph->seq = sqn_low32(sqc);
167 pdofs += pdlen + sa->sqh_len;
170 pdlen -= sizeof(*espt);
172 /* copy padding data */
173 rte_memcpy(pt, esp_pad_bytes, pdlen);
175 /* update esp trailer */
176 espt = (struct rte_esp_tail *)(pt + pdlen);
177 espt->pad_len = pdlen;
178 espt->next_proto = sa->proto;
180 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
181 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
187 * for pure cryptodev (lookaside none) depending on SA settings,
188 * we might have to write some extra data to the packet.
191 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
192 const union sym_op_data *icv)
195 struct aead_gcm_aad *aad;
197 /* insert SQN.hi between ESP trailer and ICV */
198 if (sa->sqh_len != 0) {
199 psqh = (uint32_t *)(icv->va - sa->sqh_len);
200 psqh[0] = sqn_hi32(sqc);
204 * fill IV and AAD fields, if any (aad fields are placed after icv),
205 * right now we support only one AEAD algorithm: AES-GCM .
207 if (sa->aad_len != 0) {
208 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
209 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
214 * setup/update packets and crypto ops for ESP outbound tunnel case.
217 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
218 struct rte_crypto_op *cop[], uint16_t num)
224 struct rte_ipsec_sa *sa;
225 struct rte_cryptodev_sym_session *cs;
226 union sym_op_data icv;
227 uint64_t iv[IPSEC_MAX_IV_QWORD];
234 sqn = esn_outb_update_sqn(sa, &n);
236 rte_errno = EOVERFLOW;
239 for (i = 0; i != n; i++) {
241 sqc = rte_cpu_to_be_64(sqn + i);
244 /* try to update the packet itself */
245 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
247 /* success, setup crypto op */
249 outb_pkt_xprepare(sa, sqc, &icv);
250 lksd_none_cop_prepare(cop[k], cs, mb[i]);
251 outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
253 /* failure, put packet into the death-row */
260 /* copy not prepared mbufs beyond good ones */
261 if (k != n && k != 0)
262 move_bad_mbufs(mb, dr, n, n - k);
268 * setup/update packet data and metadata for ESP outbound transport case.
270 static inline int32_t
271 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
272 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
273 uint32_t l2len, uint32_t l3len, union sym_op_data *icv,
277 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
279 struct rte_esp_hdr *esph;
280 struct rte_esp_tail *espt;
284 uhlen = l2len + l3len;
285 plen = mb->pkt_len - uhlen;
287 /* calculate extra header space required */
288 hlen = sa->iv_len + sizeof(*esph);
290 /* number of bytes to encrypt */
291 clen = plen + sizeof(*espt);
292 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
294 /* pad length + esp tail */
296 tlen = pdlen + sa->icv_len + sqh_len;
298 /* do append and insert */
299 ml = rte_pktmbuf_lastseg(mb);
300 if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
303 /* prepend space for ESP header */
304 ph = rte_pktmbuf_prepend(mb, hlen);
309 pdofs = ml->data_len;
310 ml->data_len += tlen;
312 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
314 /* shift L2/L3 headers */
315 insert_esph(ph, ph + hlen, uhlen);
317 /* update ip header fields */
318 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
321 /* update spi, seqn and iv */
322 esph = (struct rte_esp_hdr *)(ph + uhlen);
323 iv = (uint64_t *)(esph + 1);
324 copy_iv(iv, ivp, sa->iv_len);
327 esph->seq = sqn_low32(sqc);
330 pdofs += pdlen + sa->sqh_len;
333 pdlen -= sizeof(*espt);
335 /* copy padding data */
336 rte_memcpy(pt, esp_pad_bytes, pdlen);
338 /* update esp trailer */
339 espt = (struct rte_esp_tail *)(pt + pdlen);
340 espt->pad_len = pdlen;
341 espt->next_proto = np;
343 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
344 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
350 * setup/update packets and crypto ops for ESP outbound transport case.
353 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
354 struct rte_crypto_op *cop[], uint16_t num)
357 uint32_t i, k, n, l2, l3;
360 struct rte_ipsec_sa *sa;
361 struct rte_cryptodev_sym_session *cs;
362 union sym_op_data icv;
363 uint64_t iv[IPSEC_MAX_IV_QWORD];
370 sqn = esn_outb_update_sqn(sa, &n);
372 rte_errno = EOVERFLOW;
375 for (i = 0; i != n; i++) {
380 sqc = rte_cpu_to_be_64(sqn + i);
383 /* try to update the packet itself */
384 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], l2, l3, &icv,
386 /* success, setup crypto op */
388 outb_pkt_xprepare(sa, sqc, &icv);
389 lksd_none_cop_prepare(cop[k], cs, mb[i]);
390 outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
392 /* failure, put packet into the death-row */
399 /* copy not prepared mbufs beyond good ones */
400 if (k != n && k != 0)
401 move_bad_mbufs(mb, dr, n, n - k);
407 * process outbound packets for SA with ESN support,
408 * for algorithms that require SQN.hibits to be implictly included
409 * into digest computation.
410 * In that case we have to move ICV bytes back to their proper place.
413 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
416 uint32_t i, k, icv_len, *icv;
418 struct rte_ipsec_sa *sa;
424 icv_len = sa->icv_len;
426 for (i = 0; i != num; i++) {
427 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
428 ml = rte_pktmbuf_lastseg(mb[i]);
429 /* remove high-order 32 bits of esn from packet len */
430 mb[i]->pkt_len -= sa->sqh_len;
431 ml->data_len -= sa->sqh_len;
432 icv = rte_pktmbuf_mtod_offset(ml, void *,
433 ml->data_len - icv_len);
434 remove_sqh(icv, icv_len);
440 /* handle unprocessed mbufs */
444 move_bad_mbufs(mb, dr, num, num - k);
451 * prepare packets for inline ipsec processing:
452 * set ol_flags and attach metadata.
455 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
456 struct rte_mbuf *mb[], uint16_t num)
458 uint32_t i, ol_flags;
460 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
461 for (i = 0; i != num; i++) {
463 mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
465 rte_security_set_pkt_metadata(ss->security.ctx,
466 ss->security.ses, mb[i], NULL);
471 * process group of ESP outbound tunnel packets destined for
472 * INLINE_CRYPTO type of device.
475 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
476 struct rte_mbuf *mb[], uint16_t num)
482 struct rte_ipsec_sa *sa;
483 union sym_op_data icv;
484 uint64_t iv[IPSEC_MAX_IV_QWORD];
490 sqn = esn_outb_update_sqn(sa, &n);
492 rte_errno = EOVERFLOW;
495 for (i = 0; i != n; i++) {
497 sqc = rte_cpu_to_be_64(sqn + i);
500 /* try to update the packet itself */
501 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
505 /* failure, put packet into the death-row */
512 /* copy not processed mbufs beyond good ones */
513 if (k != n && k != 0)
514 move_bad_mbufs(mb, dr, n, n - k);
516 inline_outb_mbuf_prepare(ss, mb, k);
521 * process group of ESP outbound transport packets destined for
522 * INLINE_CRYPTO type of device.
525 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
526 struct rte_mbuf *mb[], uint16_t num)
529 uint32_t i, k, n, l2, l3;
532 struct rte_ipsec_sa *sa;
533 union sym_op_data icv;
534 uint64_t iv[IPSEC_MAX_IV_QWORD];
540 sqn = esn_outb_update_sqn(sa, &n);
542 rte_errno = EOVERFLOW;
545 for (i = 0; i != n; i++) {
550 sqc = rte_cpu_to_be_64(sqn + i);
553 /* try to update the packet itself */
554 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
559 /* failure, put packet into the death-row */
566 /* copy not processed mbufs beyond good ones */
567 if (k != n && k != 0)
568 move_bad_mbufs(mb, dr, n, n - k);
570 inline_outb_mbuf_prepare(ss, mb, k);
575 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
576 * actual processing is done by HW/PMD, just set flags and metadata.
579 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
580 struct rte_mbuf *mb[], uint16_t num)
582 inline_outb_mbuf_prepare(ss, mb, num);