1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
18 typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
19 struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num);
22 * setup crypto op and crypto sym op for ESP inbound packet.
25 inb_cop_prepare(struct rte_crypto_op *cop,
26 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
27 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
29 struct rte_crypto_sym_op *sop;
30 struct aead_gcm_iv *gcm;
31 struct aesctr_cnt_blk *ctr;
37 /* fill sym op fields */
41 case ALGO_TYPE_AES_GCM:
42 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
43 sop->aead.data.length = plen - sa->ctp.cipher.length;
44 sop->aead.digest.data = icv->va;
45 sop->aead.digest.phys_addr = icv->pa;
46 sop->aead.aad.data = icv->va + sa->icv_len;
47 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
49 /* fill AAD IV (located inside crypto op) */
50 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
52 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
53 pofs + sizeof(struct esp_hdr));
54 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
56 case ALGO_TYPE_AES_CBC:
57 case ALGO_TYPE_3DES_CBC:
58 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
59 sop->cipher.data.length = plen - sa->ctp.cipher.length;
60 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
61 sop->auth.data.length = plen - sa->ctp.auth.length;
62 sop->auth.digest.data = icv->va;
63 sop->auth.digest.phys_addr = icv->pa;
65 /* copy iv from the input packet to the cop */
66 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
67 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
68 pofs + sizeof(struct esp_hdr));
69 copy_iv(ivc, ivp, sa->iv_len);
71 case ALGO_TYPE_AES_CTR:
72 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
73 sop->cipher.data.length = plen - sa->ctp.cipher.length;
74 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
75 sop->auth.data.length = plen - sa->ctp.auth.length;
76 sop->auth.digest.data = icv->va;
77 sop->auth.digest.phys_addr = icv->pa;
79 /* copy iv from the input packet to the cop */
80 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
82 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
83 pofs + sizeof(struct esp_hdr));
84 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
87 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
88 sop->cipher.data.length = plen - sa->ctp.cipher.length;
89 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
90 sop->auth.data.length = plen - sa->ctp.auth.length;
91 sop->auth.digest.data = icv->va;
92 sop->auth.digest.phys_addr = icv->pa;
98 * for pure cryptodev (lookaside none) depending on SA settings,
99 * we might have to write some extra data to the packet.
102 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
103 const union sym_op_data *icv)
105 struct aead_gcm_aad *aad;
107 /* insert SQN.hi between ESP trailer and ICV */
108 if (sa->sqh_len != 0)
109 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
112 * fill AAD fields, if any (aad fields are placed after icv),
113 * right now we support only one AEAD algorithm: AES-GCM.
115 if (sa->aad_len != 0) {
116 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
117 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
122 * setup/update packet data and metadata for ESP inbound tunnel case.
124 static inline int32_t
125 inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
126 struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
130 uint32_t clen, icv_ofs, plen;
132 struct esp_hdr *esph;
134 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
137 * retrieve and reconstruct SQN, then check it, then
138 * convert it back into network byte order.
140 sqn = rte_be_to_cpu_32(esph->seq);
142 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
144 rc = esn_inb_check_sqn(rsn, sa, sqn);
148 sqn = rte_cpu_to_be_64(sqn);
150 /* start packet manipulation */
154 ml = rte_pktmbuf_lastseg(mb);
155 icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
157 /* check that packet has a valid length */
158 clen = plen - sa->ctp.cipher.length;
159 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
162 /* we have to allocate space for AAD somewhere,
163 * right now - just use free trailing space at the last segment.
164 * Would probably be more convenient to reserve space for AAD
165 * inside rte_crypto_op itself
166 * (again for IV space is already reserved inside cop).
168 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
171 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
172 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
174 inb_pkt_xprepare(sa, sqn, icv);
179 * setup/update packets and crypto ops for ESP inbound case.
182 esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
183 struct rte_crypto_op *cop[], uint16_t num)
187 struct rte_ipsec_sa *sa;
188 struct rte_cryptodev_sym_session *cs;
189 struct replay_sqn *rsn;
190 union sym_op_data icv;
195 rsn = rsn_acquire(sa);
198 for (i = 0; i != num; i++) {
200 hl = mb[i]->l2_len + mb[i]->l3_len;
201 rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
203 lksd_none_cop_prepare(cop[k], cs, mb[i]);
204 inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
210 rsn_release(sa, rsn);
212 /* copy not prepared mbufs beyond good ones */
213 if (k != num && k != 0) {
214 move_bad_mbufs(mb, dr, num, num - k);
222 * Start with processing inbound packet.
223 * This is common part for both tunnel and transport mode.
224 * Extract information that will be needed later from mbuf metadata and
225 * actual packet data:
226 * - mbuf for packet's last segment
227 * - length of the L2/L3 headers
228 * - esp tail structure
231 process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml,
232 struct esp_tail *espt, uint32_t *hlen)
234 const struct esp_tail *pt;
236 ml[0] = rte_pktmbuf_lastseg(mb);
237 hlen[0] = mb->l2_len + mb->l3_len;
238 pt = rte_pktmbuf_mtod_offset(ml[0], const struct esp_tail *,
239 ml[0]->data_len - tlen);
244 * packet checks for transport mode:
245 * - no reported IPsec related failures in ol_flags
246 * - tail length is valid
247 * - padding bytes are valid
249 static inline int32_t
250 trs_process_check(const struct rte_mbuf *mb, const struct rte_mbuf *ml,
251 struct esp_tail espt, uint32_t hlen, uint32_t tlen)
256 ofs = ml->data_len - tlen;
257 pd = rte_pktmbuf_mtod_offset(ml, const uint8_t *, ofs);
259 return ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 ||
260 ofs < 0 || tlen + hlen > mb->pkt_len ||
261 (espt.pad_len != 0 && memcmp(pd, esp_pad_bytes, espt.pad_len)));
265 * packet checks for tunnel mode:
266 * - same as for trasnport mode
267 * - esp tail next proto contains expected for that SA value
269 static inline int32_t
270 tun_process_check(const struct rte_mbuf *mb, struct rte_mbuf *ml,
271 struct esp_tail espt, uint32_t hlen, const uint32_t tlen, uint8_t proto)
273 return (trs_process_check(mb, ml, espt, hlen, tlen) ||
274 espt.next_proto != proto);
278 * step two for tunnel mode:
279 * - read SQN value (for future use)
280 * - cut of ICV, ESP tail and padding bytes
281 * - cut of ESP header and IV, also if needed - L2/L3 headers
282 * (controlled by *adj* value)
285 tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
286 uint32_t adj, uint32_t tlen, uint32_t *sqn)
288 const struct esp_hdr *ph;
291 ph = rte_pktmbuf_mtod_offset(mb, const struct esp_hdr *, hlen);
294 /* cut of ICV, ESP tail and padding bytes */
295 ml->data_len -= tlen;
298 /* cut of L2/L3 headers, ESP header and IV */
299 return rte_pktmbuf_adj(mb, adj);
303 * step two for transport mode:
304 * - read SQN value (for future use)
305 * - cut of ICV, ESP tail and padding bytes
306 * - cut of ESP header and IV
307 * - move L2/L3 header to fill the gap after ESP header removal
310 trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
311 uint32_t adj, uint32_t tlen, uint32_t *sqn)
315 /* get start of the packet before modifications */
316 op = rte_pktmbuf_mtod(mb, char *);
318 /* cut off ESP header and IV */
319 np = tun_process_step2(mb, ml, hlen, adj, tlen, sqn);
321 /* move header bytes to fill the gap after ESP header removal */
322 remove_esph(np, op, hlen);
327 * step three for transport mode:
328 * update mbuf metadata:
333 trs_process_step3(struct rte_mbuf *mb)
335 /* reset mbuf packet type */
336 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
338 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
339 mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
343 * step three for tunnel mode:
344 * update mbuf metadata:
350 tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
352 /* reset mbuf metatdata: L2/L3 len, packet type */
353 mb->packet_type = RTE_PTYPE_UNKNOWN;
354 mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
356 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
357 mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
362 * *process* function for tunnel packets
364 static inline uint16_t
365 tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
366 uint32_t sqn[], uint32_t dr[], uint16_t num)
368 uint32_t adj, i, k, tl;
370 struct esp_tail espt[num];
371 struct rte_mbuf *ml[num];
373 const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
374 const uint32_t cofs = sa->ctp.cipher.offset;
377 * to minimize stalls due to load latency,
378 * read mbufs metadata and esp tail first.
380 for (i = 0; i != num; i++)
381 process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i]);
384 for (i = 0; i != num; i++) {
387 tl = tlen + espt[i].pad_len;
389 /* check that packet is valid */
390 if (tun_process_check(mb[i], ml[i], espt[i], adj, tl,
393 /* modify packet's layout */
394 tun_process_step2(mb[i], ml[i], hl[i], adj,
396 /* update mbuf's metadata */
397 tun_process_step3(mb[i], sa->tx_offload.msk,
409 * *process* function for tunnel packets
411 static inline uint16_t
412 trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
413 uint32_t sqn[], uint32_t dr[], uint16_t num)
416 uint32_t i, k, l2, tl;
418 struct esp_tail espt[num];
419 struct rte_mbuf *ml[num];
421 const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
422 const uint32_t cofs = sa->ctp.cipher.offset;
425 * to minimize stalls due to load latency,
426 * read mbufs metadata and esp tail first.
428 for (i = 0; i != num; i++)
429 process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i]);
432 for (i = 0; i != num; i++) {
434 tl = tlen + espt[i].pad_len;
437 /* check that packet is valid */
438 if (trs_process_check(mb[i], ml[i], espt[i], hl[i] + cofs,
441 /* modify packet's layout */
442 np = trs_process_step2(mb[i], ml[i], hl[i], cofs, tl,
444 update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,
445 l2, hl[i] - l2, espt[i].next_proto);
447 /* update mbuf's metadata */
448 trs_process_step3(mb[i]);
458 * for group of ESP inbound packets perform SQN check and update.
460 static inline uint16_t
461 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
462 uint32_t dr[], uint16_t num)
465 struct replay_sqn *rsn;
467 /* replay not enabled */
468 if (sa->replay.win_sz == 0)
471 rsn = rsn_update_start(sa);
474 for (i = 0; i != num; i++) {
475 if (esn_inb_update_sqn(rsn, sa, rte_be_to_cpu_32(sqn[i])) == 0)
481 rsn_update_finish(sa, rsn);
486 * process group of ESP inbound packets.
488 static inline uint16_t
489 esp_inb_pkt_process(const struct rte_ipsec_session *ss,
490 struct rte_mbuf *mb[], uint16_t num, esp_inb_process_t process)
493 struct rte_ipsec_sa *sa;
499 /* process packets, extract seq numbers */
500 k = process(sa, mb, sqn, dr, num);
502 /* handle unprocessed mbufs */
503 if (k != num && k != 0)
504 move_bad_mbufs(mb, dr, num, num - k);
506 /* update SQN and replay winow */
507 n = esp_inb_rsn_update(sa, sqn, dr, k);
509 /* handle mbufs with wrong SQN */
510 if (n != k && n != 0)
511 move_bad_mbufs(mb, dr, k, k - n);
520 * process group of ESP inbound tunnel packets.
523 esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
524 struct rte_mbuf *mb[], uint16_t num)
526 return esp_inb_pkt_process(ss, mb, num, tun_process);
530 * process group of ESP inbound transport packets.
533 esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
534 struct rte_mbuf *mb[], uint16_t num)
536 return esp_inb_pkt_process(ss, mb, num, trs_process);