1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2020 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
18 #define MBUF_MAX_L2_LEN RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
19 #define MBUF_MAX_L3_LEN RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t)
21 /* some helper structures */
23 struct rte_crypto_auth_xform *auth;
24 struct rte_crypto_cipher_xform *cipher;
25 struct rte_crypto_aead_xform *aead;
29 * helper routine, fills internal crypto_xform structure.
32 fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
33 const struct rte_ipsec_sa_prm *prm)
35 struct rte_crypto_sym_xform *xf, *xfn;
37 memset(xform, 0, sizeof(*xform));
39 xf = prm->crypto_xform;
45 /* for AEAD just one xform required */
46 if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
49 xform->aead = &xf->aead;
51 * CIPHER+AUTH xforms are expected in strict order,
52 * depending on SA direction:
53 * inbound: AUTH+CIPHER
54 * outbound: CIPHER+AUTH
56 } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
58 /* wrong order or no cipher */
59 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
60 xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
63 xform->auth = &xf->auth;
64 xform->cipher = &xfn->cipher;
68 /* wrong order or no auth */
69 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
70 xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
73 xform->cipher = &xf->cipher;
74 xform->auth = &xfn->auth;
81 rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
87 * Based on number of buckets calculated required size for the
88 * structure that holds replay window and sequence number (RSN) information.
91 rsn_size(uint32_t nb_bucket)
94 struct replay_sqn *rsn;
96 sz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);
97 sz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
102 * for given size, calculate required number of buckets.
105 replay_num_bucket(uint32_t wsz)
109 nb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /
111 nb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);
117 ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
124 if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
127 * RFC 4303 recommends 64 as minimum window size.
128 * there is no point to use ESN mode without SQN window,
129 * so make sure we have at least 64 window when ESN is enalbed.
131 wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
132 RTE_IPSEC_SATP_ESN_DISABLE) ?
133 wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
135 n = replay_num_bucket(wsz);
138 if (n > WINDOW_BUCKET_MAX)
145 if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
146 sz *= REPLAY_SQN_NUM;
148 sz += sizeof(struct rte_ipsec_sa);
153 rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
155 memset(sa, 0, sa->size);
159 * Determine expected SA type based on input parameters.
162 fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
168 if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
169 tp |= RTE_IPSEC_SATP_PROTO_AH;
170 else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
171 tp |= RTE_IPSEC_SATP_PROTO_ESP;
175 if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
176 tp |= RTE_IPSEC_SATP_DIR_OB;
177 else if (prm->ipsec_xform.direction ==
178 RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
179 tp |= RTE_IPSEC_SATP_DIR_IB;
183 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
184 if (prm->ipsec_xform.tunnel.type ==
185 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
186 tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
187 else if (prm->ipsec_xform.tunnel.type ==
188 RTE_SECURITY_IPSEC_TUNNEL_IPV6)
189 tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
193 if (prm->tun.next_proto == IPPROTO_IPIP)
194 tp |= RTE_IPSEC_SATP_IPV4;
195 else if (prm->tun.next_proto == IPPROTO_IPV6)
196 tp |= RTE_IPSEC_SATP_IPV6;
199 } else if (prm->ipsec_xform.mode ==
200 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
201 tp |= RTE_IPSEC_SATP_MODE_TRANS;
202 if (prm->trs.proto == IPPROTO_IPIP)
203 tp |= RTE_IPSEC_SATP_IPV4;
204 else if (prm->trs.proto == IPPROTO_IPV6)
205 tp |= RTE_IPSEC_SATP_IPV6;
211 /* check for ESN flag */
212 if (prm->ipsec_xform.options.esn == 0)
213 tp |= RTE_IPSEC_SATP_ESN_DISABLE;
215 tp |= RTE_IPSEC_SATP_ESN_ENABLE;
217 /* check for ECN flag */
218 if (prm->ipsec_xform.options.ecn == 0)
219 tp |= RTE_IPSEC_SATP_ECN_DISABLE;
221 tp |= RTE_IPSEC_SATP_ECN_ENABLE;
223 /* check for DSCP flag */
224 if (prm->ipsec_xform.options.copy_dscp == 0)
225 tp |= RTE_IPSEC_SATP_DSCP_DISABLE;
227 tp |= RTE_IPSEC_SATP_DSCP_ENABLE;
229 /* interpret flags */
230 if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
231 tp |= RTE_IPSEC_SATP_SQN_ATOM;
233 tp |= RTE_IPSEC_SATP_SQN_RAW;
240 * Init ESP inbound specific things.
243 esp_inb_init(struct rte_ipsec_sa *sa)
245 /* these params may differ with new algorithms support */
246 sa->ctp.cipher.offset = sizeof(struct rte_esp_hdr) + sa->iv_len;
247 sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
250 * for AEAD and NULL algorithms we can assume that
251 * auth and cipher offsets would be equal.
253 switch (sa->algo_type) {
254 case ALGO_TYPE_AES_GCM:
256 sa->ctp.auth.raw = sa->ctp.cipher.raw;
259 sa->ctp.auth.offset = 0;
260 sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
261 sa->cofs.ofs.cipher.tail = sa->sqh_len;
265 sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
269 * Init ESP inbound tunnel specific things.
272 esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
274 sa->proto = prm->tun.next_proto;
279 * Init ESP outbound specific things.
282 esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
288 algo_type = sa->algo_type;
291 * Setup auth and cipher length and offset.
292 * these params may differ with new algorithms support
296 case ALGO_TYPE_AES_GCM:
297 case ALGO_TYPE_AES_CTR:
299 sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr) +
301 sa->ctp.cipher.length = 0;
303 case ALGO_TYPE_AES_CBC:
304 case ALGO_TYPE_3DES_CBC:
305 sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr);
306 sa->ctp.cipher.length = sa->iv_len;
311 * for AEAD and NULL algorithms we can assume that
312 * auth and cipher offsets would be equal.
315 case ALGO_TYPE_AES_GCM:
317 sa->ctp.auth.raw = sa->ctp.cipher.raw;
320 sa->ctp.auth.offset = hlen;
321 sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
322 sa->iv_len + sa->sqh_len;
326 sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
327 sa->cofs.ofs.cipher.tail = (sa->ctp.auth.offset + sa->ctp.auth.length) -
328 (sa->ctp.cipher.offset + sa->ctp.cipher.length);
332 * Init ESP outbound tunnel specific things.
335 esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
337 sa->proto = prm->tun.next_proto;
338 sa->hdr_len = prm->tun.hdr_len;
339 sa->hdr_l3_off = prm->tun.hdr_l3_off;
341 /* update l2_len and l3_len fields for outbound mbuf */
342 sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
343 sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
345 memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
347 esp_outb_init(sa, sa->hdr_len);
351 * helper function, init SA structure.
354 esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
355 const struct crypto_xform *cxf)
357 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
358 RTE_IPSEC_SATP_MODE_MASK;
360 if (prm->ipsec_xform.options.ecn)
361 sa->tos_mask |= RTE_IPV4_HDR_ECN_MASK;
363 if (prm->ipsec_xform.options.copy_dscp)
364 sa->tos_mask |= RTE_IPV4_HDR_DSCP_MASK;
366 if (cxf->aead != NULL) {
367 switch (cxf->aead->algo) {
368 case RTE_CRYPTO_AEAD_AES_GCM:
370 sa->aad_len = sizeof(struct aead_gcm_aad);
371 sa->icv_len = cxf->aead->digest_length;
372 sa->iv_ofs = cxf->aead->iv.offset;
373 sa->iv_len = sizeof(uint64_t);
374 sa->pad_align = IPSEC_PAD_AES_GCM;
375 sa->algo_type = ALGO_TYPE_AES_GCM;
381 sa->icv_len = cxf->auth->digest_length;
382 sa->iv_ofs = cxf->cipher->iv.offset;
383 sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
385 switch (cxf->cipher->algo) {
386 case RTE_CRYPTO_CIPHER_NULL:
387 sa->pad_align = IPSEC_PAD_NULL;
389 sa->algo_type = ALGO_TYPE_NULL;
392 case RTE_CRYPTO_CIPHER_AES_CBC:
393 sa->pad_align = IPSEC_PAD_AES_CBC;
394 sa->iv_len = IPSEC_MAX_IV_SIZE;
395 sa->algo_type = ALGO_TYPE_AES_CBC;
398 case RTE_CRYPTO_CIPHER_AES_CTR:
400 sa->pad_align = IPSEC_PAD_AES_CTR;
401 sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
402 sa->algo_type = ALGO_TYPE_AES_CTR;
405 case RTE_CRYPTO_CIPHER_3DES_CBC:
407 sa->pad_align = IPSEC_PAD_3DES_CBC;
408 sa->iv_len = IPSEC_3DES_IV_SIZE;
409 sa->algo_type = ALGO_TYPE_3DES_CBC;
417 sa->udata = prm->userdata;
418 sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
419 sa->salt = prm->ipsec_xform.salt;
421 /* preserve all values except l2_len and l3_len */
423 ~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
426 switch (sa->type & msk) {
427 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
428 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
429 esp_inb_tun_init(sa, prm);
431 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
434 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
435 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
436 esp_outb_tun_init(sa, prm);
438 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
439 esp_outb_init(sa, 0);
447 * helper function, init SA replay structure.
450 fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket)
452 sa->replay.win_sz = wnd_sz;
453 sa->replay.nb_bucket = nb_bucket;
454 sa->replay.bucket_index_mask = nb_bucket - 1;
455 sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
456 if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
457 sa->sqn.inb.rsn[1] = (struct replay_sqn *)
458 ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
462 rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
471 /* determine SA type */
472 rc = fill_sa_type(prm, &type);
476 /* determine required size */
477 wsz = prm->ipsec_xform.replay_win_sz;
478 return ipsec_sa_size(type, &wsz, &nb);
482 rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
488 struct crypto_xform cxf;
490 if (sa == NULL || prm == NULL)
493 /* determine SA type */
494 rc = fill_sa_type(prm, &type);
498 /* determine required size */
499 wsz = prm->ipsec_xform.replay_win_sz;
500 sz = ipsec_sa_size(type, &wsz, &nb);
503 else if (size < (uint32_t)sz)
506 /* only esp is supported right now */
507 if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
510 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
511 prm->tun.hdr_len > sizeof(sa->hdr))
514 rc = fill_crypto_xform(&cxf, type, prm);
524 /* check for ESN flag */
525 sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
526 UINT32_MAX : UINT64_MAX;
528 rc = esp_sa_init(sa, prm, &cxf);
530 rte_ipsec_sa_fini(sa);
532 /* fill replay window related fields */
534 fill_sa_replay(sa, wsz, nb);
540 * setup crypto ops for LOOKASIDE_PROTO type of devices.
543 lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
544 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
547 struct rte_crypto_sym_op *sop;
549 for (i = 0; i != num; i++) {
551 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
552 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
553 cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
555 __rte_security_attach_session(sop, ss->security.ses);
560 * setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
561 * Note that for LOOKASIDE_PROTO all packet modifications will be
562 * performed by PMD/HW.
563 * SW has only to prepare crypto op.
566 lksd_proto_prepare(const struct rte_ipsec_session *ss,
567 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
569 lksd_proto_cop_prepare(ss, mb, cop, num);
574 * simplest pkt process routine:
575 * all actual processing is already done by HW/PMD,
576 * just check mbuf ol_flags.
578 * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
579 * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
580 * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
583 pkt_flag_process(const struct rte_ipsec_session *ss,
584 struct rte_mbuf *mb[], uint16_t num)
592 for (i = 0; i != num; i++) {
593 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
599 /* handle unprocessed mbufs */
603 move_bad_mbufs(mb, dr, num, num - k);
610 * Select packet processing function for session on LOOKASIDE_NONE
614 lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
615 struct rte_ipsec_sa_pkt_func *pf)
619 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
620 RTE_IPSEC_SATP_MODE_MASK;
623 switch (sa->type & msk) {
624 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
625 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
626 pf->prepare.async = esp_inb_pkt_prepare;
627 pf->process = esp_inb_tun_pkt_process;
629 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
630 pf->prepare.async = esp_inb_pkt_prepare;
631 pf->process = esp_inb_trs_pkt_process;
633 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
634 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
635 pf->prepare.async = esp_outb_tun_prepare;
636 pf->process = (sa->sqh_len != 0) ?
637 esp_outb_sqh_process : pkt_flag_process;
639 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
640 pf->prepare.async = esp_outb_trs_prepare;
641 pf->process = (sa->sqh_len != 0) ?
642 esp_outb_sqh_process : pkt_flag_process;
652 cpu_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
653 struct rte_ipsec_sa_pkt_func *pf)
657 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
658 RTE_IPSEC_SATP_MODE_MASK;
661 switch (sa->type & msk) {
662 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
663 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
664 pf->prepare.sync = cpu_inb_pkt_prepare;
665 pf->process = esp_inb_tun_pkt_process;
667 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
668 pf->prepare.sync = cpu_inb_pkt_prepare;
669 pf->process = esp_inb_trs_pkt_process;
671 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
672 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
673 pf->prepare.sync = cpu_outb_tun_pkt_prepare;
674 pf->process = (sa->sqh_len != 0) ?
675 esp_outb_sqh_process : pkt_flag_process;
677 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
678 pf->prepare.sync = cpu_outb_trs_pkt_prepare;
679 pf->process = (sa->sqh_len != 0) ?
680 esp_outb_sqh_process : pkt_flag_process;
690 * Select packet processing function for session on INLINE_CRYPTO
694 inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
695 struct rte_ipsec_sa_pkt_func *pf)
699 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
700 RTE_IPSEC_SATP_MODE_MASK;
703 switch (sa->type & msk) {
704 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
705 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
706 pf->process = inline_inb_tun_pkt_process;
708 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
709 pf->process = inline_inb_trs_pkt_process;
711 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
712 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
713 pf->process = inline_outb_tun_pkt_process;
715 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
716 pf->process = inline_outb_trs_pkt_process;
726 * Select packet processing function for given session based on SA parameters
727 * and type of associated with the session device.
730 ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
731 const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
736 pf[0] = (struct rte_ipsec_sa_pkt_func) { {NULL}, NULL };
739 case RTE_SECURITY_ACTION_TYPE_NONE:
740 rc = lksd_none_pkt_func_select(sa, pf);
742 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
743 rc = inline_crypto_pkt_func_select(sa, pf);
745 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
746 if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
747 RTE_IPSEC_SATP_DIR_IB)
748 pf->process = pkt_flag_process;
750 pf->process = inline_proto_outb_pkt_process;
752 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
753 pf->prepare.async = lksd_proto_prepare;
754 pf->process = pkt_flag_process;
756 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
757 rc = cpu_crypto_pkt_func_select(sa, pf);