1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
19 * setup crypto op and crypto sym op for ESP inbound tunnel packet.
22 inb_cop_prepare(struct rte_crypto_op *cop,
23 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
24 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
26 struct rte_crypto_sym_op *sop;
27 struct aead_gcm_iv *gcm;
28 struct aesctr_cnt_blk *ctr;
34 /* fill sym op fields */
38 case ALGO_TYPE_AES_GCM:
39 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
40 sop->aead.data.length = plen - sa->ctp.cipher.length;
41 sop->aead.digest.data = icv->va;
42 sop->aead.digest.phys_addr = icv->pa;
43 sop->aead.aad.data = icv->va + sa->icv_len;
44 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
46 /* fill AAD IV (located inside crypto op) */
47 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
49 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
50 pofs + sizeof(struct esp_hdr));
51 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
53 case ALGO_TYPE_AES_CBC:
54 case ALGO_TYPE_3DES_CBC:
55 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
56 sop->cipher.data.length = plen - sa->ctp.cipher.length;
57 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
58 sop->auth.data.length = plen - sa->ctp.auth.length;
59 sop->auth.digest.data = icv->va;
60 sop->auth.digest.phys_addr = icv->pa;
62 /* copy iv from the input packet to the cop */
63 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
64 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
65 pofs + sizeof(struct esp_hdr));
66 copy_iv(ivc, ivp, sa->iv_len);
68 case ALGO_TYPE_AES_CTR:
69 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
70 sop->cipher.data.length = plen - sa->ctp.cipher.length;
71 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
72 sop->auth.data.length = plen - sa->ctp.auth.length;
73 sop->auth.digest.data = icv->va;
74 sop->auth.digest.phys_addr = icv->pa;
76 /* copy iv from the input packet to the cop */
77 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
79 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
80 pofs + sizeof(struct esp_hdr));
81 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
84 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
85 sop->cipher.data.length = plen - sa->ctp.cipher.length;
86 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
87 sop->auth.data.length = plen - sa->ctp.auth.length;
88 sop->auth.digest.data = icv->va;
89 sop->auth.digest.phys_addr = icv->pa;
95 * for pure cryptodev (lookaside none) depending on SA settings,
96 * we might have to write some extra data to the packet.
99 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
100 const union sym_op_data *icv)
102 struct aead_gcm_aad *aad;
104 /* insert SQN.hi between ESP trailer and ICV */
105 if (sa->sqh_len != 0)
106 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
109 * fill AAD fields, if any (aad fields are placed after icv),
110 * right now we support only one AEAD algorithm: AES-GCM.
112 if (sa->aad_len != 0) {
113 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
114 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
119 * setup/update packet data and metadata for ESP inbound tunnel case.
121 static inline int32_t
122 inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
123 struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
127 uint32_t clen, icv_ofs, plen;
129 struct esp_hdr *esph;
131 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
134 * retrieve and reconstruct SQN, then check it, then
135 * convert it back into network byte order.
137 sqn = rte_be_to_cpu_32(esph->seq);
139 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
141 rc = esn_inb_check_sqn(rsn, sa, sqn);
145 sqn = rte_cpu_to_be_64(sqn);
147 /* start packet manipulation */
151 ml = rte_pktmbuf_lastseg(mb);
152 icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
154 /* check that packet has a valid length */
155 clen = plen - sa->ctp.cipher.length;
156 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
159 /* we have to allocate space for AAD somewhere,
160 * right now - just use free trailing space at the last segment.
161 * Would probably be more convenient to reserve space for AAD
162 * inside rte_crypto_op itself
163 * (again for IV space is already reserved inside cop).
165 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
168 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
169 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
171 inb_pkt_xprepare(sa, sqn, icv);
176 * setup/update packets and crypto ops for ESP inbound case.
179 esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
180 struct rte_crypto_op *cop[], uint16_t num)
184 struct rte_ipsec_sa *sa;
185 struct rte_cryptodev_sym_session *cs;
186 struct replay_sqn *rsn;
187 union sym_op_data icv;
192 rsn = rsn_acquire(sa);
195 for (i = 0; i != num; i++) {
197 hl = mb[i]->l2_len + mb[i]->l3_len;
198 rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
200 lksd_none_cop_prepare(cop[k], cs, mb[i]);
201 inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
207 rsn_release(sa, rsn);
209 /* copy not prepared mbufs beyond good ones */
210 if (k != num && k != 0) {
211 move_bad_mbufs(mb, dr, num, num - k);
219 * process ESP inbound tunnel packet.
222 inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
225 uint32_t hlen, icv_len, tlen;
226 struct esp_hdr *esph;
227 struct esp_tail *espt;
231 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
234 icv_len = sa->icv_len;
236 ml = rte_pktmbuf_lastseg(mb);
237 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
238 ml->data_len - icv_len - sizeof(*espt));
241 * check padding and next proto.
242 * return an error if something is wrong.
244 pd = (char *)espt - espt->pad_len;
245 if (espt->next_proto != sa->proto ||
246 memcmp(pd, esp_pad_bytes, espt->pad_len))
249 /* cut of ICV, ESP tail and padding bytes */
250 tlen = icv_len + sizeof(*espt) + espt->pad_len;
251 ml->data_len -= tlen;
254 /* cut of L2/L3 headers, ESP header and IV */
255 hlen = mb->l2_len + mb->l3_len;
256 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
257 rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);
259 /* retrieve SQN for later check */
260 *sqn = rte_be_to_cpu_32(esph->seq);
262 /* reset mbuf metatdata: L2/L3 len, packet type */
263 mb->packet_type = RTE_PTYPE_UNKNOWN;
264 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
267 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
268 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
273 * process ESP inbound transport packet.
276 inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
279 uint32_t hlen, icv_len, l2len, l3len, tlen;
280 struct esp_hdr *esph;
281 struct esp_tail *espt;
285 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
288 icv_len = sa->icv_len;
290 ml = rte_pktmbuf_lastseg(mb);
291 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
292 ml->data_len - icv_len - sizeof(*espt));
294 /* check padding, return an error if something is wrong. */
295 pd = (char *)espt - espt->pad_len;
296 if (memcmp(pd, esp_pad_bytes, espt->pad_len))
299 /* cut of ICV, ESP tail and padding bytes */
300 tlen = icv_len + sizeof(*espt) + espt->pad_len;
301 ml->data_len -= tlen;
304 /* retrieve SQN for later check */
307 hlen = l2len + l3len;
308 op = rte_pktmbuf_mtod(mb, char *);
309 esph = (struct esp_hdr *)(op + hlen);
310 *sqn = rte_be_to_cpu_32(esph->seq);
312 /* cut off ESP header and IV, update L3 header */
313 np = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);
314 remove_esph(np, op, hlen);
315 update_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,
318 /* reset mbuf packet type */
319 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
321 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
322 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
327 * for group of ESP inbound packets perform SQN check and update.
329 static inline uint16_t
330 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
331 uint32_t dr[], uint16_t num)
334 struct replay_sqn *rsn;
336 rsn = rsn_update_start(sa);
339 for (i = 0; i != num; i++) {
340 if (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)
346 rsn_update_finish(sa, rsn);
351 * process group of ESP inbound tunnel packets.
354 esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
355 struct rte_mbuf *mb[], uint16_t num)
358 struct rte_ipsec_sa *sa;
364 /* process packets, extract seq numbers */
367 for (i = 0; i != num; i++) {
369 if (inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)
371 /* bad packet, will drop from furhter processing */
376 /* handle unprocessed mbufs */
377 if (k != num && k != 0)
378 move_bad_mbufs(mb, dr, num, num - k);
380 /* update SQN and replay winow */
381 n = esp_inb_rsn_update(sa, sqn, dr, k);
383 /* handle mbufs with wrong SQN */
384 if (n != k && n != 0)
385 move_bad_mbufs(mb, dr, k, k - n);
394 * process group of ESP inbound transport packets.
397 esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
398 struct rte_mbuf *mb[], uint16_t num)
402 struct rte_ipsec_sa *sa;
407 /* process packets, extract seq numbers */
410 for (i = 0; i != num; i++) {
412 if (inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)
414 /* bad packet, will drop from furhter processing */
419 /* handle unprocessed mbufs */
420 if (k != num && k != 0)
421 move_bad_mbufs(mb, dr, num, num - k);
423 /* update SQN and replay winow */
424 n = esp_inb_rsn_update(sa, sqn, dr, k);
426 /* handle mbufs with wrong SQN */
427 if (n != k && n != 0)
428 move_bad_mbufs(mb, dr, k, k - n);