1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2020 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
18 typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
19 struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
23 * helper function to fill crypto_sym op for cipher+auth algorithms.
24 * used by inb_cop_prepare(), see below.
27 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
28 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
29 uint32_t pofs, uint32_t plen)
31 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
32 sop->cipher.data.length = plen - sa->ctp.cipher.length;
33 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
34 sop->auth.data.length = plen - sa->ctp.auth.length;
35 sop->auth.digest.data = icv->va;
36 sop->auth.digest.phys_addr = icv->pa;
40 * helper function to fill crypto_sym op for aead algorithms
41 * used by inb_cop_prepare(), see below.
44 sop_aead_prepare(struct rte_crypto_sym_op *sop,
45 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
46 uint32_t pofs, uint32_t plen)
48 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
49 sop->aead.data.length = plen - sa->ctp.cipher.length;
50 sop->aead.digest.data = icv->va;
51 sop->aead.digest.phys_addr = icv->pa;
52 sop->aead.aad.data = icv->va + sa->icv_len;
53 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
57 * setup crypto op and crypto sym op for ESP inbound packet.
60 inb_cop_prepare(struct rte_crypto_op *cop,
61 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
62 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
64 struct rte_crypto_sym_op *sop;
65 struct aead_gcm_iv *gcm;
66 struct aesctr_cnt_blk *ctr;
71 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
72 pofs + sizeof(struct rte_esp_hdr));
74 /* fill sym op fields */
78 case ALGO_TYPE_AES_GCM:
79 sop_aead_prepare(sop, sa, icv, pofs, plen);
81 /* fill AAD IV (located inside crypto op) */
82 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
84 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
86 case ALGO_TYPE_AES_CBC:
87 case ALGO_TYPE_3DES_CBC:
88 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
90 /* copy iv from the input packet to the cop */
91 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
92 copy_iv(ivc, ivp, sa->iv_len);
94 case ALGO_TYPE_AES_CTR:
95 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
97 /* fill CTR block (located inside crypto op) */
98 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
100 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
103 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
108 static inline uint32_t
109 inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
110 uint32_t *pofs, uint32_t plen, void *iv)
112 struct aead_gcm_iv *gcm;
113 struct aesctr_cnt_blk *ctr;
117 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
118 *pofs + sizeof(struct rte_esp_hdr));
121 switch (sa->algo_type) {
122 case ALGO_TYPE_AES_GCM:
123 gcm = (struct aead_gcm_iv *)iv;
124 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
126 case ALGO_TYPE_AES_CBC:
127 case ALGO_TYPE_3DES_CBC:
128 copy_iv(iv, ivp, sa->iv_len);
130 case ALGO_TYPE_AES_CTR:
131 ctr = (struct aesctr_cnt_blk *)iv;
132 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
136 *pofs += sa->ctp.auth.offset;
137 clen = plen - sa->ctp.auth.length;
142 * Helper function for prepare() to deal with situation when
143 * ICV is spread by two segments. Tries to move ICV completely into the
146 static struct rte_mbuf *
147 move_icv(struct rte_mbuf *ml, uint32_t ofs)
155 n = ml->data_len - ofs;
157 prev = rte_pktmbuf_mtod_offset(ml, const void *, ofs);
158 new = rte_pktmbuf_prepend(ms, n);
162 /* move n ICV bytes from ml into ms */
163 rte_memcpy(new, prev, n);
170 * for pure cryptodev (lookaside none) depending on SA settings,
171 * we might have to write some extra data to the packet.
174 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
175 const union sym_op_data *icv)
177 struct aead_gcm_aad *aad;
179 /* insert SQN.hi between ESP trailer and ICV */
180 if (sa->sqh_len != 0)
181 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
184 * fill AAD fields, if any (aad fields are placed after icv),
185 * right now we support only one AEAD algorithm: AES-GCM.
187 if (sa->aad_len != 0) {
188 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
189 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
194 inb_get_sqn(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
195 struct rte_mbuf *mb, uint32_t hlen, rte_be64_t *sqc)
199 struct rte_esp_hdr *esph;
201 esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
204 * retrieve and reconstruct SQN, then check it, then
205 * convert it back into network byte order.
207 sqn = rte_be_to_cpu_32(esph->seq);
209 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
210 *sqc = rte_cpu_to_be_64(sqn);
212 /* check IPsec window */
213 rc = esn_inb_check_sqn(rsn, sa, sqn);
218 /* prepare packet for upcoming processing */
219 static inline int32_t
220 inb_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
221 uint32_t hlen, union sym_op_data *icv)
223 uint32_t clen, icv_len, icv_ofs, plen;
226 /* start packet manipulation */
230 /* check that packet has a valid length */
231 clen = plen - sa->ctp.cipher.length;
232 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
235 /* find ICV location */
236 icv_len = sa->icv_len;
237 icv_ofs = mb->pkt_len - icv_len;
239 ml = mbuf_get_seg_ofs(mb, &icv_ofs);
242 * if ICV is spread by two segments, then try to
243 * move ICV completely into the last segment.
245 if (ml->data_len < icv_ofs + icv_len) {
247 ml = move_icv(ml, icv_ofs);
251 /* new ICV location */
255 icv_ofs += sa->sqh_len;
258 * we have to allocate space for AAD somewhere,
259 * right now - just use free trailing space at the last segment.
260 * Would probably be more convenient to reserve space for AAD
261 * inside rte_crypto_op itself
262 * (again for IV space is already reserved inside cop).
264 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
267 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
268 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
271 * if esn is used then high-order 32 bits are also used in ICV
272 * calculation but are not transmitted, update packet length
273 * to be consistent with auth data length and offset, this will
274 * be subtracted from packet length in post crypto processing
276 mb->pkt_len += sa->sqh_len;
277 ml->data_len += sa->sqh_len;
282 static inline int32_t
283 inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
284 struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
289 rc = inb_get_sqn(sa, rsn, mb, hlen, &sqn);
293 rc = inb_prepare(sa, mb, hlen, icv);
297 inb_pkt_xprepare(sa, sqn, icv);
302 * setup/update packets and crypto ops for ESP inbound case.
305 esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
306 struct rte_crypto_op *cop[], uint16_t num)
310 struct rte_ipsec_sa *sa;
311 struct rte_cryptodev_sym_session *cs;
312 struct replay_sqn *rsn;
313 union sym_op_data icv;
318 rsn = rsn_acquire(sa);
321 for (i = 0; i != num; i++) {
323 hl = mb[i]->l2_len + mb[i]->l3_len;
324 rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
326 lksd_none_cop_prepare(cop[k], cs, mb[i]);
327 inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
335 rsn_release(sa, rsn);
337 /* copy not prepared mbufs beyond good ones */
338 if (k != num && k != 0)
339 move_bad_mbufs(mb, dr, num, num - k);
345 * Start with processing inbound packet.
346 * This is common part for both tunnel and transport mode.
347 * Extract information that will be needed later from mbuf metadata and
348 * actual packet data:
349 * - mbuf for packet's last segment
350 * - length of the L2/L3 headers
351 * - esp tail structure
354 process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml,
355 struct rte_esp_tail *espt, uint32_t *hlen, uint32_t *tofs)
357 const struct rte_esp_tail *pt;
360 ofs = mb->pkt_len - tlen;
361 hlen[0] = mb->l2_len + mb->l3_len;
362 ml[0] = mbuf_get_seg_ofs(mb, &ofs);
363 pt = rte_pktmbuf_mtod_offset(ml[0], const struct rte_esp_tail *, ofs);
369 * Helper function to check pad bytes values.
370 * Note that pad bytes can be spread across multiple segments.
373 check_pad_bytes(struct rte_mbuf *mb, uint32_t ofs, uint32_t len)
378 for (n = 0; n != len; n += k, mb = mb->next) {
379 k = mb->data_len - ofs;
380 k = RTE_MIN(k, len - n);
381 pd = rte_pktmbuf_mtod_offset(mb, const uint8_t *, ofs);
382 if (memcmp(pd, esp_pad_bytes + n, k) != 0)
391 * packet checks for transport mode:
392 * - no reported IPsec related failures in ol_flags
393 * - tail and header lengths are valid
394 * - padding bytes are valid
395 * apart from checks, function also updates tail offset (and segment)
396 * by taking into account pad length.
398 static inline int32_t
399 trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
400 uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen)
402 if ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 ||
403 tlen + hlen > mb->pkt_len)
406 /* padding bytes are spread over multiple segments */
407 if (tofs[0] < espt.pad_len) {
408 tofs[0] = mb->pkt_len - tlen;
409 ml[0] = mbuf_get_seg_ofs(mb, tofs);
411 tofs[0] -= espt.pad_len;
413 return check_pad_bytes(ml[0], tofs[0], espt.pad_len);
417 * packet checks for tunnel mode:
418 * - same as for trasnport mode
419 * - esp tail next proto contains expected for that SA value
421 static inline int32_t
422 tun_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
423 uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen,
426 return (trs_process_check(mb, ml, tofs, espt, hlen, tlen) ||
427 espt.next_proto != proto);
431 * step two for tunnel mode:
432 * - read SQN value (for future use)
433 * - cut of ICV, ESP tail and padding bytes
434 * - cut of ESP header and IV, also if needed - L2/L3 headers
435 * (controlled by *adj* value)
438 tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
439 uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
441 const struct rte_esp_hdr *ph;
444 ph = rte_pktmbuf_mtod_offset(mb, const struct rte_esp_hdr *, hlen);
447 /* cut of ICV, ESP tail and padding bytes */
448 mbuf_cut_seg_ofs(mb, ml, tofs, tlen);
450 /* cut of L2/L3 headers, ESP header and IV */
451 return rte_pktmbuf_adj(mb, adj);
455 * step two for transport mode:
456 * - read SQN value (for future use)
457 * - cut of ICV, ESP tail and padding bytes
458 * - cut of ESP header and IV
459 * - move L2/L3 header to fill the gap after ESP header removal
462 trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
463 uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
467 /* get start of the packet before modifications */
468 op = rte_pktmbuf_mtod(mb, char *);
470 /* cut off ESP header and IV */
471 np = tun_process_step2(mb, ml, hlen, adj, tofs, tlen, sqn);
473 /* move header bytes to fill the gap after ESP header removal */
474 remove_esph(np, op, hlen);
479 * step three for transport mode:
480 * update mbuf metadata:
485 trs_process_step3(struct rte_mbuf *mb)
487 /* reset mbuf packet type */
488 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
490 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
491 mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
495 * step three for tunnel mode:
496 * update mbuf metadata:
502 tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
504 /* reset mbuf metatdata: L2/L3 len, packet type */
505 mb->packet_type = RTE_PTYPE_UNKNOWN;
506 mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
508 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
509 mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
513 * *process* function for tunnel packets
515 static inline uint16_t
516 tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
517 uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
519 uint32_t adj, i, k, tl;
520 uint32_t hl[num], to[num];
521 struct rte_esp_tail espt[num];
522 struct rte_mbuf *ml[num];
527 * remove icv, esp trailer and high-order
528 * 32 bits of esn from packet length
530 const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
531 const uint32_t cofs = sa->ctp.cipher.offset;
534 * to minimize stalls due to load latency,
535 * read mbufs metadata and esp tail first.
537 for (i = 0; i != num; i++)
538 process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
541 for (i = 0; i != num; i++) {
544 tl = tlen + espt[i].pad_len;
546 /* check that packet is valid */
547 if (tun_process_check(mb[i], &ml[i], &to[i], espt[i], adj, tl,
550 outh = rte_pktmbuf_mtod_offset(mb[i], uint8_t *,
553 /* modify packet's layout */
554 inh = tun_process_step2(mb[i], ml[i], hl[i], adj,
557 /* update inner ip header */
558 update_tun_inb_l3hdr(sa, outh, inh);
560 /* update mbuf's metadata */
561 tun_process_step3(mb[i], sa->tx_offload.msk,
572 * *process* function for tunnel packets
574 static inline uint16_t
575 trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
576 uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
579 uint32_t i, k, l2, tl;
580 uint32_t hl[num], to[num];
581 struct rte_esp_tail espt[num];
582 struct rte_mbuf *ml[num];
585 * remove icv, esp trailer and high-order
586 * 32 bits of esn from packet length
588 const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
589 const uint32_t cofs = sa->ctp.cipher.offset;
592 * to minimize stalls due to load latency,
593 * read mbufs metadata and esp tail first.
595 for (i = 0; i != num; i++)
596 process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
599 for (i = 0; i != num; i++) {
601 tl = tlen + espt[i].pad_len;
604 /* check that packet is valid */
605 if (trs_process_check(mb[i], &ml[i], &to[i], espt[i],
606 hl[i] + cofs, tl) == 0) {
608 /* modify packet's layout */
609 np = trs_process_step2(mb[i], ml[i], hl[i], cofs,
611 update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,
612 l2, hl[i] - l2, espt[i].next_proto);
614 /* update mbuf's metadata */
615 trs_process_step3(mb[i]);
625 * for group of ESP inbound packets perform SQN check and update.
627 static inline uint16_t
628 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
629 uint32_t dr[], uint16_t num)
632 struct replay_sqn *rsn;
634 /* replay not enabled */
635 if (sa->replay.win_sz == 0)
638 rsn = rsn_update_start(sa);
641 for (i = 0; i != num; i++) {
642 if (esn_inb_update_sqn(rsn, sa, rte_be_to_cpu_32(sqn[i])) == 0)
648 rsn_update_finish(sa, rsn);
653 * process group of ESP inbound packets.
655 static inline uint16_t
656 esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
657 uint16_t num, uint8_t sqh_len, esp_inb_process_t process)
663 /* process packets, extract seq numbers */
664 k = process(sa, mb, sqn, dr, num, sqh_len);
666 /* handle unprocessed mbufs */
667 if (k != num && k != 0)
668 move_bad_mbufs(mb, dr, num, num - k);
670 /* update SQN and replay window */
671 n = esp_inb_rsn_update(sa, sqn, dr, k);
673 /* handle mbufs with wrong SQN */
674 if (n != k && n != 0)
675 move_bad_mbufs(mb, dr, k, k - n);
684 * Prepare (plus actual crypto/auth) routine for inbound CPU-CRYPTO
685 * (synchronous mode).
688 cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
689 struct rte_mbuf *mb[], uint16_t num)
693 struct rte_ipsec_sa *sa;
694 struct replay_sqn *rsn;
695 union sym_op_data icv;
702 uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
707 rsn = rsn_acquire(sa);
709 /* do preparation for all packets */
710 for (i = 0, k = 0; i != num; i++) {
712 /* calculate ESP header offset */
713 l4ofs[k] = mb[i]->l2_len + mb[i]->l3_len;
715 /* prepare ESP packet for processing */
716 rc = inb_pkt_prepare(sa, rsn, mb[i], l4ofs[k], &icv);
718 /* get encrypted data offset and length */
719 clen[k] = inb_cpu_crypto_prepare(sa, mb[i],
720 l4ofs + k, rc, ivbuf[k]);
722 /* fill iv, digest and aad */
724 aad[k] = icv.va + sa->icv_len;
732 /* release rsn lock */
733 rsn_release(sa, rsn);
735 /* copy not prepared mbufs beyond good ones */
736 if (k != num && k != 0)
737 move_bad_mbufs(mb, dr, num, num - k);
739 /* convert mbufs to iovecs and do actual crypto/auth processing */
741 cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
747 * process group of ESP inbound tunnel packets.
750 esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
751 struct rte_mbuf *mb[], uint16_t num)
753 struct rte_ipsec_sa *sa = ss->sa;
755 return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, tun_process);
759 inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
760 struct rte_mbuf *mb[], uint16_t num)
762 return esp_inb_pkt_process(ss->sa, mb, num, 0, tun_process);
766 * process group of ESP inbound transport packets.
769 esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
770 struct rte_mbuf *mb[], uint16_t num)
772 struct rte_ipsec_sa *sa = ss->sa;
774 return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, trs_process);
778 inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
779 struct rte_mbuf *mb[], uint16_t num)
781 return esp_inb_pkt_process(ss->sa, mb, num, 0, trs_process);