1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
20 * helper function to fill crypto_sym op for cipher+auth algorithms.
21 * used by outb_cop_prepare(), see below.
24 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
25 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
26 uint32_t pofs, uint32_t plen)
28 sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
29 sop->cipher.data.length = sa->ctp.cipher.length + plen;
30 sop->auth.data.offset = sa->ctp.auth.offset + pofs;
31 sop->auth.data.length = sa->ctp.auth.length + plen;
32 sop->auth.digest.data = icv->va;
33 sop->auth.digest.phys_addr = icv->pa;
37 * helper function to fill crypto_sym op for cipher+auth algorithms.
38 * used by outb_cop_prepare(), see below.
41 sop_aead_prepare(struct rte_crypto_sym_op *sop,
42 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
43 uint32_t pofs, uint32_t plen)
45 sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
46 sop->aead.data.length = sa->ctp.cipher.length + plen;
47 sop->aead.digest.data = icv->va;
48 sop->aead.digest.phys_addr = icv->pa;
49 sop->aead.aad.data = icv->va + sa->icv_len;
50 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
54 * setup crypto op and crypto sym op for ESP outbound packet.
57 outb_cop_prepare(struct rte_crypto_op *cop,
58 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
59 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
61 struct rte_crypto_sym_op *sop;
62 struct aead_gcm_iv *gcm;
63 struct aesctr_cnt_blk *ctr;
68 /* fill sym op fields */
72 case ALGO_TYPE_AES_CBC:
73 /* Cipher-Auth (AES-CBC *) case */
74 case ALGO_TYPE_3DES_CBC:
75 /* Cipher-Auth (3DES-CBC *) case */
78 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
80 case ALGO_TYPE_AES_GCM:
81 /* AEAD (AES_GCM) case */
82 sop_aead_prepare(sop, sa, icv, hlen, plen);
84 /* fill AAD IV (located inside crypto op) */
85 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
87 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
89 case ALGO_TYPE_AES_CTR:
90 /* Cipher-Auth (AES-CTR *) case */
91 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
93 /* fill CTR block (located inside crypto op) */
94 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
96 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
102 * setup/update packet data and metadata for ESP outbound tunnel case.
104 static inline int32_t
105 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
106 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
107 union sym_op_data *icv)
109 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
111 struct rte_esp_hdr *esph;
112 struct esp_tail *espt;
116 /* calculate extra header space required */
117 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
119 /* size of ipsec protected data */
121 plen = mb->pkt_len - l2len;
123 /* number of bytes to encrypt */
124 clen = plen + sizeof(*espt);
125 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
127 /* pad length + esp tail */
129 tlen = pdlen + sa->icv_len;
131 /* do append and prepend */
132 ml = rte_pktmbuf_lastseg(mb);
133 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
137 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
142 pdofs = ml->data_len;
143 ml->data_len += tlen;
145 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
147 /* update pkt l2/l3 len */
148 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
151 /* copy tunnel pkt header */
152 rte_memcpy(ph, sa->hdr, sa->hdr_len);
154 /* update original and new ip header fields */
155 update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
158 /* update spi, seqn and iv */
159 esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
160 iv = (uint64_t *)(esph + 1);
161 copy_iv(iv, ivp, sa->iv_len);
164 esph->seq = sqn_low32(sqc);
167 pdofs += pdlen + sa->sqh_len;
170 pdlen -= sizeof(*espt);
172 /* copy padding data */
173 rte_memcpy(pt, esp_pad_bytes, pdlen);
175 /* update esp trailer */
176 espt = (struct esp_tail *)(pt + pdlen);
177 espt->pad_len = pdlen;
178 espt->next_proto = sa->proto;
180 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
181 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
187 * for pure cryptodev (lookaside none) depending on SA settings,
188 * we might have to write some extra data to the packet.
191 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
192 const union sym_op_data *icv)
195 struct aead_gcm_aad *aad;
197 /* insert SQN.hi between ESP trailer and ICV */
198 if (sa->sqh_len != 0) {
199 psqh = (uint32_t *)(icv->va - sa->sqh_len);
200 psqh[0] = sqn_hi32(sqc);
204 * fill IV and AAD fields, if any (aad fields are placed after icv),
205 * right now we support only one AEAD algorithm: AES-GCM .
207 if (sa->aad_len != 0) {
208 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
209 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
214 * setup/update packets and crypto ops for ESP outbound tunnel case.
217 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
218 struct rte_crypto_op *cop[], uint16_t num)
224 struct rte_ipsec_sa *sa;
225 struct rte_cryptodev_sym_session *cs;
226 union sym_op_data icv;
227 uint64_t iv[IPSEC_MAX_IV_QWORD];
234 sqn = esn_outb_update_sqn(sa, &n);
236 rte_errno = EOVERFLOW;
239 for (i = 0; i != n; i++) {
241 sqc = rte_cpu_to_be_64(sqn + i);
244 /* try to update the packet itself */
245 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
247 /* success, setup crypto op */
249 outb_pkt_xprepare(sa, sqc, &icv);
250 lksd_none_cop_prepare(cop[k], cs, mb[i]);
251 outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
253 /* failure, put packet into the death-row */
260 /* copy not prepared mbufs beyond good ones */
261 if (k != n && k != 0)
262 move_bad_mbufs(mb, dr, n, n - k);
268 * setup/update packet data and metadata for ESP outbound transport case.
270 static inline int32_t
271 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
272 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
273 uint32_t l2len, uint32_t l3len, union sym_op_data *icv)
276 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
278 struct rte_esp_hdr *esph;
279 struct esp_tail *espt;
283 uhlen = l2len + l3len;
284 plen = mb->pkt_len - uhlen;
286 /* calculate extra header space required */
287 hlen = sa->iv_len + sizeof(*esph);
289 /* number of bytes to encrypt */
290 clen = plen + sizeof(*espt);
291 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
293 /* pad length + esp tail */
295 tlen = pdlen + sa->icv_len;
297 /* do append and insert */
298 ml = rte_pktmbuf_lastseg(mb);
299 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
302 /* prepend space for ESP header */
303 ph = rte_pktmbuf_prepend(mb, hlen);
308 pdofs = ml->data_len;
309 ml->data_len += tlen;
311 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
313 /* shift L2/L3 headers */
314 insert_esph(ph, ph + hlen, uhlen);
316 /* update ip header fields */
317 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
320 /* update spi, seqn and iv */
321 esph = (struct rte_esp_hdr *)(ph + uhlen);
322 iv = (uint64_t *)(esph + 1);
323 copy_iv(iv, ivp, sa->iv_len);
326 esph->seq = sqn_low32(sqc);
329 pdofs += pdlen + sa->sqh_len;
332 pdlen -= sizeof(*espt);
334 /* copy padding data */
335 rte_memcpy(pt, esp_pad_bytes, pdlen);
337 /* update esp trailer */
338 espt = (struct esp_tail *)(pt + pdlen);
339 espt->pad_len = pdlen;
340 espt->next_proto = np;
342 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
343 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
349 * setup/update packets and crypto ops for ESP outbound transport case.
352 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
353 struct rte_crypto_op *cop[], uint16_t num)
356 uint32_t i, k, n, l2, l3;
359 struct rte_ipsec_sa *sa;
360 struct rte_cryptodev_sym_session *cs;
361 union sym_op_data icv;
362 uint64_t iv[IPSEC_MAX_IV_QWORD];
369 sqn = esn_outb_update_sqn(sa, &n);
371 rte_errno = EOVERFLOW;
374 for (i = 0; i != n; i++) {
379 sqc = rte_cpu_to_be_64(sqn + i);
382 /* try to update the packet itself */
383 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], l2, l3, &icv);
385 /* success, setup crypto op */
387 outb_pkt_xprepare(sa, sqc, &icv);
388 lksd_none_cop_prepare(cop[k], cs, mb[i]);
389 outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
391 /* failure, put packet into the death-row */
398 /* copy not prepared mbufs beyond good ones */
399 if (k != n && k != 0)
400 move_bad_mbufs(mb, dr, n, n - k);
406 * process outbound packets for SA with ESN support,
407 * for algorithms that require SQN.hibits to be implictly included
408 * into digest computation.
409 * In that case we have to move ICV bytes back to their proper place.
412 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
415 uint32_t i, k, icv_len, *icv;
417 struct rte_ipsec_sa *sa;
423 icv_len = sa->icv_len;
425 for (i = 0; i != num; i++) {
426 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
427 ml = rte_pktmbuf_lastseg(mb[i]);
428 icv = rte_pktmbuf_mtod_offset(ml, void *,
429 ml->data_len - icv_len);
430 remove_sqh(icv, icv_len);
436 /* handle unprocessed mbufs */
440 move_bad_mbufs(mb, dr, num, num - k);
447 * prepare packets for inline ipsec processing:
448 * set ol_flags and attach metadata.
451 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
452 struct rte_mbuf *mb[], uint16_t num)
454 uint32_t i, ol_flags;
456 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
457 for (i = 0; i != num; i++) {
459 mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
461 rte_security_set_pkt_metadata(ss->security.ctx,
462 ss->security.ses, mb[i], NULL);
467 * process group of ESP outbound tunnel packets destined for
468 * INLINE_CRYPTO type of device.
471 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
472 struct rte_mbuf *mb[], uint16_t num)
478 struct rte_ipsec_sa *sa;
479 union sym_op_data icv;
480 uint64_t iv[IPSEC_MAX_IV_QWORD];
486 sqn = esn_outb_update_sqn(sa, &n);
488 rte_errno = EOVERFLOW;
491 for (i = 0; i != n; i++) {
493 sqc = rte_cpu_to_be_64(sqn + i);
496 /* try to update the packet itself */
497 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
501 /* failure, put packet into the death-row */
508 /* copy not processed mbufs beyond good ones */
509 if (k != n && k != 0)
510 move_bad_mbufs(mb, dr, n, n - k);
512 inline_outb_mbuf_prepare(ss, mb, k);
517 * process group of ESP outbound transport packets destined for
518 * INLINE_CRYPTO type of device.
521 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
522 struct rte_mbuf *mb[], uint16_t num)
525 uint32_t i, k, n, l2, l3;
528 struct rte_ipsec_sa *sa;
529 union sym_op_data icv;
530 uint64_t iv[IPSEC_MAX_IV_QWORD];
536 sqn = esn_outb_update_sqn(sa, &n);
538 rte_errno = EOVERFLOW;
541 for (i = 0; i != n; i++) {
546 sqc = rte_cpu_to_be_64(sqn + i);
549 /* try to update the packet itself */
550 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
555 /* failure, put packet into the death-row */
562 /* copy not processed mbufs beyond good ones */
563 if (k != n && k != 0)
564 move_bad_mbufs(mb, dr, n, n - k);
566 inline_outb_mbuf_prepare(ss, mb, k);
571 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
572 * actual processing is done by HW/PMD, just set flags and metadata.
575 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
576 struct rte_mbuf *mb[], uint16_t num)
578 inline_outb_mbuf_prepare(ss, mb, num);