1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
18 typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
19 struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num);
22 * helper function to fill crypto_sym op for cipher+auth algorithms.
23 * used by inb_cop_prepare(), see below.
26 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
27 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
28 uint32_t pofs, uint32_t plen)
30 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
31 sop->cipher.data.length = plen - sa->ctp.cipher.length;
32 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
33 sop->auth.data.length = plen - sa->ctp.auth.length;
34 sop->auth.digest.data = icv->va;
35 sop->auth.digest.phys_addr = icv->pa;
39 * helper function to fill crypto_sym op for aead algorithms
40 * used by inb_cop_prepare(), see below.
43 sop_aead_prepare(struct rte_crypto_sym_op *sop,
44 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
45 uint32_t pofs, uint32_t plen)
47 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
48 sop->aead.data.length = plen - sa->ctp.cipher.length;
49 sop->aead.digest.data = icv->va;
50 sop->aead.digest.phys_addr = icv->pa;
51 sop->aead.aad.data = icv->va + sa->icv_len;
52 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
56 * setup crypto op and crypto sym op for ESP inbound packet.
59 inb_cop_prepare(struct rte_crypto_op *cop,
60 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
61 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
63 struct rte_crypto_sym_op *sop;
64 struct aead_gcm_iv *gcm;
65 struct aesctr_cnt_blk *ctr;
70 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
71 pofs + sizeof(struct rte_esp_hdr));
73 /* fill sym op fields */
77 case ALGO_TYPE_AES_GCM:
78 sop_aead_prepare(sop, sa, icv, pofs, plen);
80 /* fill AAD IV (located inside crypto op) */
81 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
83 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
85 case ALGO_TYPE_AES_CBC:
86 case ALGO_TYPE_3DES_CBC:
87 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
89 /* copy iv from the input packet to the cop */
90 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
91 copy_iv(ivc, ivp, sa->iv_len);
93 case ALGO_TYPE_AES_CTR:
94 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
96 /* fill CTR block (located inside crypto op) */
97 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
99 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
102 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
108 * Helper function for prepare() to deal with situation when
109 * ICV is spread by two segments. Tries to move ICV completely into the
112 static struct rte_mbuf *
113 move_icv(struct rte_mbuf *ml, uint32_t ofs)
121 n = ml->data_len - ofs;
123 prev = rte_pktmbuf_mtod_offset(ml, const void *, ofs);
124 new = rte_pktmbuf_prepend(ms, n);
128 /* move n ICV bytes from ml into ms */
129 rte_memcpy(new, prev, n);
136 * for pure cryptodev (lookaside none) depending on SA settings,
137 * we might have to write some extra data to the packet.
140 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
141 const union sym_op_data *icv)
143 struct aead_gcm_aad *aad;
145 /* insert SQN.hi between ESP trailer and ICV */
146 if (sa->sqh_len != 0)
147 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
150 * fill AAD fields, if any (aad fields are placed after icv),
151 * right now we support only one AEAD algorithm: AES-GCM.
153 if (sa->aad_len != 0) {
154 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
155 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
160 * setup/update packet data and metadata for ESP inbound tunnel case.
162 static inline int32_t
163 inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
164 struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
168 uint32_t clen, icv_len, icv_ofs, plen;
170 struct rte_esp_hdr *esph;
172 esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
175 * retrieve and reconstruct SQN, then check it, then
176 * convert it back into network byte order.
178 sqn = rte_be_to_cpu_32(esph->seq);
180 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
182 rc = esn_inb_check_sqn(rsn, sa, sqn);
186 sqn = rte_cpu_to_be_64(sqn);
188 /* start packet manipulation */
192 /* check that packet has a valid length */
193 clen = plen - sa->ctp.cipher.length;
194 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
197 /* find ICV location */
198 icv_len = sa->icv_len;
199 icv_ofs = mb->pkt_len - icv_len;
201 ml = mbuf_get_seg_ofs(mb, &icv_ofs);
204 * if ICV is spread by two segments, then try to
205 * move ICV completely into the last segment.
207 if (ml->data_len < icv_ofs + icv_len) {
209 ml = move_icv(ml, icv_ofs);
213 /* new ICV location */
217 icv_ofs += sa->sqh_len;
219 /* we have to allocate space for AAD somewhere,
220 * right now - just use free trailing space at the last segment.
221 * Would probably be more convenient to reserve space for AAD
222 * inside rte_crypto_op itself
223 * (again for IV space is already reserved inside cop).
225 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
228 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
229 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
231 inb_pkt_xprepare(sa, sqn, icv);
236 * setup/update packets and crypto ops for ESP inbound case.
239 esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
240 struct rte_crypto_op *cop[], uint16_t num)
244 struct rte_ipsec_sa *sa;
245 struct rte_cryptodev_sym_session *cs;
246 struct replay_sqn *rsn;
247 union sym_op_data icv;
252 rsn = rsn_acquire(sa);
255 for (i = 0; i != num; i++) {
257 hl = mb[i]->l2_len + mb[i]->l3_len;
258 rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
260 lksd_none_cop_prepare(cop[k], cs, mb[i]);
261 inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
267 rsn_release(sa, rsn);
269 /* copy not prepared mbufs beyond good ones */
270 if (k != num && k != 0) {
271 move_bad_mbufs(mb, dr, num, num - k);
279 * Start with processing inbound packet.
280 * This is common part for both tunnel and transport mode.
281 * Extract information that will be needed later from mbuf metadata and
282 * actual packet data:
283 * - mbuf for packet's last segment
284 * - length of the L2/L3 headers
285 * - esp tail structure
288 process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml,
289 struct esp_tail *espt, uint32_t *hlen, uint32_t *tofs)
291 const struct esp_tail *pt;
294 ofs = mb->pkt_len - tlen;
295 hlen[0] = mb->l2_len + mb->l3_len;
296 ml[0] = mbuf_get_seg_ofs(mb, &ofs);
297 pt = rte_pktmbuf_mtod_offset(ml[0], const struct esp_tail *, ofs);
303 * Helper function to check pad bytes values.
304 * Note that pad bytes can be spread across multiple segments.
307 check_pad_bytes(struct rte_mbuf *mb, uint32_t ofs, uint32_t len)
312 for (n = 0; n != len; n += k, mb = mb->next) {
313 k = mb->data_len - ofs;
314 k = RTE_MIN(k, len - n);
315 pd = rte_pktmbuf_mtod_offset(mb, const uint8_t *, ofs);
316 if (memcmp(pd, esp_pad_bytes + n, k) != 0)
325 * packet checks for transport mode:
326 * - no reported IPsec related failures in ol_flags
327 * - tail and header lengths are valid
328 * - padding bytes are valid
329 * apart from checks, function also updates tail offset (and segment)
330 * by taking into account pad length.
332 static inline int32_t
333 trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
334 uint32_t *tofs, struct esp_tail espt, uint32_t hlen, uint32_t tlen)
336 if ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 ||
337 tlen + hlen > mb->pkt_len)
340 /* padding bytes are spread over multiple segments */
341 if (tofs[0] < espt.pad_len) {
342 tofs[0] = mb->pkt_len - tlen;
343 ml[0] = mbuf_get_seg_ofs(mb, tofs);
345 tofs[0] -= espt.pad_len;
347 return check_pad_bytes(ml[0], tofs[0], espt.pad_len);
351 * packet checks for tunnel mode:
352 * - same as for trasnport mode
353 * - esp tail next proto contains expected for that SA value
355 static inline int32_t
356 tun_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
357 uint32_t *tofs, struct esp_tail espt, uint32_t hlen, uint32_t tlen,
360 return (trs_process_check(mb, ml, tofs, espt, hlen, tlen) ||
361 espt.next_proto != proto);
365 * step two for tunnel mode:
366 * - read SQN value (for future use)
367 * - cut of ICV, ESP tail and padding bytes
368 * - cut of ESP header and IV, also if needed - L2/L3 headers
369 * (controlled by *adj* value)
372 tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
373 uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
375 const struct rte_esp_hdr *ph;
378 ph = rte_pktmbuf_mtod_offset(mb, const struct rte_esp_hdr *, hlen);
381 /* cut of ICV, ESP tail and padding bytes */
382 mbuf_cut_seg_ofs(mb, ml, tofs, tlen);
384 /* cut of L2/L3 headers, ESP header and IV */
385 return rte_pktmbuf_adj(mb, adj);
389 * step two for transport mode:
390 * - read SQN value (for future use)
391 * - cut of ICV, ESP tail and padding bytes
392 * - cut of ESP header and IV
393 * - move L2/L3 header to fill the gap after ESP header removal
396 trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
397 uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
401 /* get start of the packet before modifications */
402 op = rte_pktmbuf_mtod(mb, char *);
404 /* cut off ESP header and IV */
405 np = tun_process_step2(mb, ml, hlen, adj, tofs, tlen, sqn);
407 /* move header bytes to fill the gap after ESP header removal */
408 remove_esph(np, op, hlen);
413 * step three for transport mode:
414 * update mbuf metadata:
419 trs_process_step3(struct rte_mbuf *mb)
421 /* reset mbuf packet type */
422 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
424 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
425 mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
429 * step three for tunnel mode:
430 * update mbuf metadata:
436 tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
438 /* reset mbuf metatdata: L2/L3 len, packet type */
439 mb->packet_type = RTE_PTYPE_UNKNOWN;
440 mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
442 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
443 mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
448 * *process* function for tunnel packets
450 static inline uint16_t
451 tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
452 uint32_t sqn[], uint32_t dr[], uint16_t num)
454 uint32_t adj, i, k, tl;
455 uint32_t hl[num], to[num];
456 struct esp_tail espt[num];
457 struct rte_mbuf *ml[num];
459 const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
460 const uint32_t cofs = sa->ctp.cipher.offset;
463 * to minimize stalls due to load latency,
464 * read mbufs metadata and esp tail first.
466 for (i = 0; i != num; i++)
467 process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
470 for (i = 0; i != num; i++) {
473 tl = tlen + espt[i].pad_len;
475 /* check that packet is valid */
476 if (tun_process_check(mb[i], &ml[i], &to[i], espt[i], adj, tl,
479 /* modify packet's layout */
480 tun_process_step2(mb[i], ml[i], hl[i], adj, to[i],
482 /* update mbuf's metadata */
483 tun_process_step3(mb[i], sa->tx_offload.msk,
495 * *process* function for tunnel packets
497 static inline uint16_t
498 trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
499 uint32_t sqn[], uint32_t dr[], uint16_t num)
502 uint32_t i, k, l2, tl;
503 uint32_t hl[num], to[num];
504 struct esp_tail espt[num];
505 struct rte_mbuf *ml[num];
507 const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
508 const uint32_t cofs = sa->ctp.cipher.offset;
511 * to minimize stalls due to load latency,
512 * read mbufs metadata and esp tail first.
514 for (i = 0; i != num; i++)
515 process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
518 for (i = 0; i != num; i++) {
520 tl = tlen + espt[i].pad_len;
523 /* check that packet is valid */
524 if (trs_process_check(mb[i], &ml[i], &to[i], espt[i],
525 hl[i] + cofs, tl) == 0) {
527 /* modify packet's layout */
528 np = trs_process_step2(mb[i], ml[i], hl[i], cofs,
530 update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,
531 l2, hl[i] - l2, espt[i].next_proto);
533 /* update mbuf's metadata */
534 trs_process_step3(mb[i]);
544 * for group of ESP inbound packets perform SQN check and update.
546 static inline uint16_t
547 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
548 uint32_t dr[], uint16_t num)
551 struct replay_sqn *rsn;
553 /* replay not enabled */
554 if (sa->replay.win_sz == 0)
557 rsn = rsn_update_start(sa);
560 for (i = 0; i != num; i++) {
561 if (esn_inb_update_sqn(rsn, sa, rte_be_to_cpu_32(sqn[i])) == 0)
567 rsn_update_finish(sa, rsn);
572 * process group of ESP inbound packets.
574 static inline uint16_t
575 esp_inb_pkt_process(const struct rte_ipsec_session *ss,
576 struct rte_mbuf *mb[], uint16_t num, esp_inb_process_t process)
579 struct rte_ipsec_sa *sa;
585 /* process packets, extract seq numbers */
586 k = process(sa, mb, sqn, dr, num);
588 /* handle unprocessed mbufs */
589 if (k != num && k != 0)
590 move_bad_mbufs(mb, dr, num, num - k);
592 /* update SQN and replay winow */
593 n = esp_inb_rsn_update(sa, sqn, dr, k);
595 /* handle mbufs with wrong SQN */
596 if (n != k && n != 0)
597 move_bad_mbufs(mb, dr, k, k - n);
606 * process group of ESP inbound tunnel packets.
609 esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
610 struct rte_mbuf *mb[], uint16_t num)
612 return esp_inb_pkt_process(ss, mb, num, tun_process);
616 * process group of ESP inbound transport packets.
619 esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
620 struct rte_mbuf *mb[], uint16_t num)
622 return esp_inb_pkt_process(ss, mb, num, trs_process);