1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
19 * setup crypto op and crypto sym op for ESP outbound packet.
22 outb_cop_prepare(struct rte_crypto_op *cop,
23 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
24 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
26 struct rte_crypto_sym_op *sop;
27 struct aead_gcm_iv *gcm;
28 struct aesctr_cnt_blk *ctr;
33 /* fill sym op fields */
37 case ALGO_TYPE_AES_CBC:
38 /* Cipher-Auth (AES-CBC *) case */
39 case ALGO_TYPE_3DES_CBC:
40 /* Cipher-Auth (3DES-CBC *) case */
43 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
44 sop->cipher.data.length = sa->ctp.cipher.length + plen;
45 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
46 sop->auth.data.length = sa->ctp.auth.length + plen;
47 sop->auth.digest.data = icv->va;
48 sop->auth.digest.phys_addr = icv->pa;
50 case ALGO_TYPE_AES_GCM:
51 /* AEAD (AES_GCM) case */
52 sop->aead.data.offset = sa->ctp.cipher.offset + hlen;
53 sop->aead.data.length = sa->ctp.cipher.length + plen;
54 sop->aead.digest.data = icv->va;
55 sop->aead.digest.phys_addr = icv->pa;
56 sop->aead.aad.data = icv->va + sa->icv_len;
57 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
59 /* fill AAD IV (located inside crypto op) */
60 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
62 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
64 case ALGO_TYPE_AES_CTR:
65 /* Cipher-Auth (AES-CTR *) case */
66 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
67 sop->cipher.data.length = sa->ctp.cipher.length + plen;
68 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
69 sop->auth.data.length = sa->ctp.auth.length + plen;
70 sop->auth.digest.data = icv->va;
71 sop->auth.digest.phys_addr = icv->pa;
73 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
75 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
81 * setup/update packet data and metadata for ESP outbound tunnel case.
84 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
85 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
86 union sym_op_data *icv)
88 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
91 struct esp_tail *espt;
95 /* calculate extra header space required */
96 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
98 /* size of ipsec protected data */
100 plen = mb->pkt_len - l2len;
102 /* number of bytes to encrypt */
103 clen = plen + sizeof(*espt);
104 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
106 /* pad length + esp tail */
108 tlen = pdlen + sa->icv_len;
110 /* do append and prepend */
111 ml = rte_pktmbuf_lastseg(mb);
112 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
116 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
121 pdofs = ml->data_len;
122 ml->data_len += tlen;
124 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
126 /* update pkt l2/l3 len */
127 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
130 /* copy tunnel pkt header */
131 rte_memcpy(ph, sa->hdr, sa->hdr_len);
133 /* update original and new ip header fields */
134 update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
137 /* update spi, seqn and iv */
138 esph = (struct esp_hdr *)(ph + sa->hdr_len);
139 iv = (uint64_t *)(esph + 1);
140 copy_iv(iv, ivp, sa->iv_len);
143 esph->seq = sqn_low32(sqc);
146 pdofs += pdlen + sa->sqh_len;
149 pdlen -= sizeof(*espt);
151 /* copy padding data */
152 rte_memcpy(pt, esp_pad_bytes, pdlen);
154 /* update esp trailer */
155 espt = (struct esp_tail *)(pt + pdlen);
156 espt->pad_len = pdlen;
157 espt->next_proto = sa->proto;
159 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
160 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
166 * for pure cryptodev (lookaside none) depending on SA settings,
167 * we might have to write some extra data to the packet.
170 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
171 const union sym_op_data *icv)
174 struct aead_gcm_aad *aad;
176 /* insert SQN.hi between ESP trailer and ICV */
177 if (sa->sqh_len != 0) {
178 psqh = (uint32_t *)(icv->va - sa->sqh_len);
179 psqh[0] = sqn_hi32(sqc);
183 * fill IV and AAD fields, if any (aad fields are placed after icv),
184 * right now we support only one AEAD algorithm: AES-GCM .
186 if (sa->aad_len != 0) {
187 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
188 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
193 * setup/update packets and crypto ops for ESP outbound tunnel case.
196 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
197 struct rte_crypto_op *cop[], uint16_t num)
203 struct rte_ipsec_sa *sa;
204 struct rte_cryptodev_sym_session *cs;
205 union sym_op_data icv;
206 uint64_t iv[IPSEC_MAX_IV_QWORD];
213 sqn = esn_outb_update_sqn(sa, &n);
215 rte_errno = EOVERFLOW;
218 for (i = 0; i != n; i++) {
220 sqc = rte_cpu_to_be_64(sqn + i);
223 /* try to update the packet itself */
224 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
226 /* success, setup crypto op */
228 outb_pkt_xprepare(sa, sqc, &icv);
229 lksd_none_cop_prepare(cop[k], cs, mb[i]);
230 outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
232 /* failure, put packet into the death-row */
239 /* copy not prepared mbufs beyond good ones */
240 if (k != n && k != 0)
241 move_bad_mbufs(mb, dr, n, n - k);
247 * setup/update packet data and metadata for ESP outbound transport case.
249 static inline int32_t
250 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
251 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
252 uint32_t l2len, uint32_t l3len, union sym_op_data *icv)
255 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
257 struct esp_hdr *esph;
258 struct esp_tail *espt;
262 uhlen = l2len + l3len;
263 plen = mb->pkt_len - uhlen;
265 /* calculate extra header space required */
266 hlen = sa->iv_len + sizeof(*esph);
268 /* number of bytes to encrypt */
269 clen = plen + sizeof(*espt);
270 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
272 /* pad length + esp tail */
274 tlen = pdlen + sa->icv_len;
276 /* do append and insert */
277 ml = rte_pktmbuf_lastseg(mb);
278 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
281 /* prepend space for ESP header */
282 ph = rte_pktmbuf_prepend(mb, hlen);
287 pdofs = ml->data_len;
288 ml->data_len += tlen;
290 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
292 /* shift L2/L3 headers */
293 insert_esph(ph, ph + hlen, uhlen);
295 /* update ip header fields */
296 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
299 /* update spi, seqn and iv */
300 esph = (struct esp_hdr *)(ph + uhlen);
301 iv = (uint64_t *)(esph + 1);
302 copy_iv(iv, ivp, sa->iv_len);
305 esph->seq = sqn_low32(sqc);
308 pdofs += pdlen + sa->sqh_len;
311 pdlen -= sizeof(*espt);
313 /* copy padding data */
314 rte_memcpy(pt, esp_pad_bytes, pdlen);
316 /* update esp trailer */
317 espt = (struct esp_tail *)(pt + pdlen);
318 espt->pad_len = pdlen;
319 espt->next_proto = np;
321 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
322 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
328 * setup/update packets and crypto ops for ESP outbound transport case.
331 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
332 struct rte_crypto_op *cop[], uint16_t num)
335 uint32_t i, k, n, l2, l3;
338 struct rte_ipsec_sa *sa;
339 struct rte_cryptodev_sym_session *cs;
340 union sym_op_data icv;
341 uint64_t iv[IPSEC_MAX_IV_QWORD];
348 sqn = esn_outb_update_sqn(sa, &n);
350 rte_errno = EOVERFLOW;
353 for (i = 0; i != n; i++) {
358 sqc = rte_cpu_to_be_64(sqn + i);
361 /* try to update the packet itself */
362 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], l2, l3, &icv);
364 /* success, setup crypto op */
366 outb_pkt_xprepare(sa, sqc, &icv);
367 lksd_none_cop_prepare(cop[k], cs, mb[i]);
368 outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
370 /* failure, put packet into the death-row */
377 /* copy not prepared mbufs beyond good ones */
378 if (k != n && k != 0)
379 move_bad_mbufs(mb, dr, n, n - k);
385 * process outbound packets for SA with ESN support,
386 * for algorithms that require SQN.hibits to be implictly included
387 * into digest computation.
388 * In that case we have to move ICV bytes back to their proper place.
391 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
394 uint32_t i, k, icv_len, *icv;
396 struct rte_ipsec_sa *sa;
402 icv_len = sa->icv_len;
404 for (i = 0; i != num; i++) {
405 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
406 ml = rte_pktmbuf_lastseg(mb[i]);
407 icv = rte_pktmbuf_mtod_offset(ml, void *,
408 ml->data_len - icv_len);
409 remove_sqh(icv, icv_len);
415 /* handle unprocessed mbufs */
419 move_bad_mbufs(mb, dr, num, num - k);
426 * prepare packets for inline ipsec processing:
427 * set ol_flags and attach metadata.
430 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
431 struct rte_mbuf *mb[], uint16_t num)
433 uint32_t i, ol_flags;
435 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
436 for (i = 0; i != num; i++) {
438 mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
440 rte_security_set_pkt_metadata(ss->security.ctx,
441 ss->security.ses, mb[i], NULL);
446 * process group of ESP outbound tunnel packets destined for
447 * INLINE_CRYPTO type of device.
450 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
451 struct rte_mbuf *mb[], uint16_t num)
457 struct rte_ipsec_sa *sa;
458 union sym_op_data icv;
459 uint64_t iv[IPSEC_MAX_IV_QWORD];
465 sqn = esn_outb_update_sqn(sa, &n);
467 rte_errno = EOVERFLOW;
470 for (i = 0; i != n; i++) {
472 sqc = rte_cpu_to_be_64(sqn + i);
475 /* try to update the packet itself */
476 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
480 /* failure, put packet into the death-row */
487 /* copy not processed mbufs beyond good ones */
488 if (k != n && k != 0)
489 move_bad_mbufs(mb, dr, n, n - k);
491 inline_outb_mbuf_prepare(ss, mb, k);
496 * process group of ESP outbound transport packets destined for
497 * INLINE_CRYPTO type of device.
500 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
501 struct rte_mbuf *mb[], uint16_t num)
504 uint32_t i, k, n, l2, l3;
507 struct rte_ipsec_sa *sa;
508 union sym_op_data icv;
509 uint64_t iv[IPSEC_MAX_IV_QWORD];
515 sqn = esn_outb_update_sqn(sa, &n);
517 rte_errno = EOVERFLOW;
520 for (i = 0; i != n; i++) {
525 sqc = rte_cpu_to_be_64(sqn + i);
528 /* try to update the packet itself */
529 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
534 /* failure, put packet into the death-row */
541 /* copy not processed mbufs beyond good ones */
542 if (k != n && k != 0)
543 move_bad_mbufs(mb, dr, n, n - k);
545 inline_outb_mbuf_prepare(ss, mb, k);
550 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
551 * actual processing is done by HW/PMD, just set flags and metadata.
554 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
555 struct rte_mbuf *mb[], uint16_t num)
557 inline_outb_mbuf_prepare(ss, mb, num);