1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
17 #define MBUF_MAX_L2_LEN RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
18 #define MBUF_MAX_L3_LEN RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t)
20 /* some helper structures */
22 struct rte_crypto_auth_xform *auth;
23 struct rte_crypto_cipher_xform *cipher;
24 struct rte_crypto_aead_xform *aead;
28 * helper routine, fills internal crypto_xform structure.
31 fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
32 const struct rte_ipsec_sa_prm *prm)
34 struct rte_crypto_sym_xform *xf, *xfn;
36 memset(xform, 0, sizeof(*xform));
38 xf = prm->crypto_xform;
44 /* for AEAD just one xform required */
45 if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
48 xform->aead = &xf->aead;
50 * CIPHER+AUTH xforms are expected in strict order,
51 * depending on SA direction:
52 * inbound: AUTH+CIPHER
53 * outbound: CIPHER+AUTH
55 } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
57 /* wrong order or no cipher */
58 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
59 xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
62 xform->auth = &xf->auth;
63 xform->cipher = &xfn->cipher;
67 /* wrong order or no auth */
68 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
69 xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
72 xform->cipher = &xf->cipher;
73 xform->auth = &xfn->auth;
79 uint64_t __rte_experimental
80 rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
86 ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
93 if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
96 * RFC 4303 recommends 64 as minimum window size.
97 * there is no point to use ESN mode without SQN window,
98 * so make sure we have at least 64 window when ESN is enalbed.
100 wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
101 RTE_IPSEC_SATP_ESN_DISABLE) ?
102 wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
104 n = replay_num_bucket(wsz);
107 if (n > WINDOW_BUCKET_MAX)
114 if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
115 sz *= REPLAY_SQN_NUM;
117 sz += sizeof(struct rte_ipsec_sa);
121 void __rte_experimental
122 rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
124 memset(sa, 0, sa->size);
128 * Determine expected SA type based on input parameters.
131 fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
137 if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
138 tp |= RTE_IPSEC_SATP_PROTO_AH;
139 else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
140 tp |= RTE_IPSEC_SATP_PROTO_ESP;
144 if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
145 tp |= RTE_IPSEC_SATP_DIR_OB;
146 else if (prm->ipsec_xform.direction ==
147 RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
148 tp |= RTE_IPSEC_SATP_DIR_IB;
152 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
153 if (prm->ipsec_xform.tunnel.type ==
154 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
155 tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
156 else if (prm->ipsec_xform.tunnel.type ==
157 RTE_SECURITY_IPSEC_TUNNEL_IPV6)
158 tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
162 if (prm->tun.next_proto == IPPROTO_IPIP)
163 tp |= RTE_IPSEC_SATP_IPV4;
164 else if (prm->tun.next_proto == IPPROTO_IPV6)
165 tp |= RTE_IPSEC_SATP_IPV6;
168 } else if (prm->ipsec_xform.mode ==
169 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
170 tp |= RTE_IPSEC_SATP_MODE_TRANS;
171 if (prm->trs.proto == IPPROTO_IPIP)
172 tp |= RTE_IPSEC_SATP_IPV4;
173 else if (prm->trs.proto == IPPROTO_IPV6)
174 tp |= RTE_IPSEC_SATP_IPV6;
180 /* check for ESN flag */
181 if (prm->ipsec_xform.options.esn == 0)
182 tp |= RTE_IPSEC_SATP_ESN_DISABLE;
184 tp |= RTE_IPSEC_SATP_ESN_ENABLE;
186 /* interpret flags */
187 if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
188 tp |= RTE_IPSEC_SATP_SQN_ATOM;
190 tp |= RTE_IPSEC_SATP_SQN_RAW;
197 * Init ESP inbound specific things.
200 esp_inb_init(struct rte_ipsec_sa *sa)
202 /* these params may differ with new algorithms support */
203 sa->ctp.auth.offset = 0;
204 sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
205 sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
206 sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
210 * Init ESP inbound tunnel specific things.
213 esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
215 sa->proto = prm->tun.next_proto;
220 * Init ESP outbound specific things.
223 esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
227 sa->sqn.outb.raw = 1;
229 /* these params may differ with new algorithms support */
230 sa->ctp.auth.offset = hlen;
231 sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
233 algo_type = sa->algo_type;
236 case ALGO_TYPE_AES_GCM:
237 case ALGO_TYPE_AES_CTR:
239 sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
241 sa->ctp.cipher.length = 0;
243 case ALGO_TYPE_AES_CBC:
244 case ALGO_TYPE_3DES_CBC:
245 sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
246 sa->ctp.cipher.length = sa->iv_len;
252 * Init ESP outbound tunnel specific things.
255 esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
257 sa->proto = prm->tun.next_proto;
258 sa->hdr_len = prm->tun.hdr_len;
259 sa->hdr_l3_off = prm->tun.hdr_l3_off;
261 /* update l2_len and l3_len fields for outbound mbuf */
262 sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
263 sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
265 memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
267 esp_outb_init(sa, sa->hdr_len);
271 * helper function, init SA structure.
274 esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
275 const struct crypto_xform *cxf)
277 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
278 RTE_IPSEC_SATP_MODE_MASK;
280 if (cxf->aead != NULL) {
281 switch (cxf->aead->algo) {
282 case RTE_CRYPTO_AEAD_AES_GCM:
284 sa->aad_len = sizeof(struct aead_gcm_aad);
285 sa->icv_len = cxf->aead->digest_length;
286 sa->iv_ofs = cxf->aead->iv.offset;
287 sa->iv_len = sizeof(uint64_t);
288 sa->pad_align = IPSEC_PAD_AES_GCM;
289 sa->algo_type = ALGO_TYPE_AES_GCM;
295 sa->icv_len = cxf->auth->digest_length;
296 sa->iv_ofs = cxf->cipher->iv.offset;
297 sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
299 switch (cxf->cipher->algo) {
300 case RTE_CRYPTO_CIPHER_NULL:
301 sa->pad_align = IPSEC_PAD_NULL;
303 sa->algo_type = ALGO_TYPE_NULL;
306 case RTE_CRYPTO_CIPHER_AES_CBC:
307 sa->pad_align = IPSEC_PAD_AES_CBC;
308 sa->iv_len = IPSEC_MAX_IV_SIZE;
309 sa->algo_type = ALGO_TYPE_AES_CBC;
312 case RTE_CRYPTO_CIPHER_AES_CTR:
314 sa->pad_align = IPSEC_PAD_AES_CTR;
315 sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
316 sa->algo_type = ALGO_TYPE_AES_CTR;
319 case RTE_CRYPTO_CIPHER_3DES_CBC:
321 sa->pad_align = IPSEC_PAD_3DES_CBC;
322 sa->iv_len = IPSEC_3DES_IV_SIZE;
323 sa->algo_type = ALGO_TYPE_3DES_CBC;
331 sa->udata = prm->userdata;
332 sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
333 sa->salt = prm->ipsec_xform.salt;
335 /* preserve all values except l2_len and l3_len */
337 ~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
340 switch (sa->type & msk) {
341 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
342 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
343 esp_inb_tun_init(sa, prm);
345 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
348 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
349 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
350 esp_outb_tun_init(sa, prm);
352 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
353 esp_outb_init(sa, 0);
361 * helper function, init SA replay structure.
364 fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket)
366 sa->replay.win_sz = wnd_sz;
367 sa->replay.nb_bucket = nb_bucket;
368 sa->replay.bucket_index_mask = nb_bucket - 1;
369 sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
370 if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
371 sa->sqn.inb.rsn[1] = (struct replay_sqn *)
372 ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
375 int __rte_experimental
376 rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
385 /* determine SA type */
386 rc = fill_sa_type(prm, &type);
390 /* determine required size */
391 wsz = prm->replay_win_sz;
392 return ipsec_sa_size(type, &wsz, &nb);
395 int __rte_experimental
396 rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
402 struct crypto_xform cxf;
404 if (sa == NULL || prm == NULL)
407 /* determine SA type */
408 rc = fill_sa_type(prm, &type);
412 /* determine required size */
413 wsz = prm->replay_win_sz;
414 sz = ipsec_sa_size(type, &wsz, &nb);
417 else if (size < (uint32_t)sz)
420 /* only esp is supported right now */
421 if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
424 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
425 prm->tun.hdr_len > sizeof(sa->hdr))
428 rc = fill_crypto_xform(&cxf, type, prm);
438 /* check for ESN flag */
439 sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
440 UINT32_MAX : UINT64_MAX;
442 rc = esp_sa_init(sa, prm, &cxf);
444 rte_ipsec_sa_fini(sa);
446 /* fill replay window related fields */
448 fill_sa_replay(sa, wsz, nb);
454 * Move bad (unprocessed) mbufs beyond the good (processed) ones.
455 * bad_idx[] contains the indexes of bad mbufs inside the mb[].
458 move_bad_mbufs(struct rte_mbuf *mb[], const uint32_t bad_idx[], uint32_t nb_mb,
462 struct rte_mbuf *drb[nb_bad];
467 /* copy bad ones into a temp place */
468 for (i = 0; i != nb_mb; i++) {
469 if (j != nb_bad && i == bad_idx[j])
475 /* copy bad ones after the good ones */
476 for (i = 0; i != nb_bad; i++)
481 * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.
484 lksd_none_cop_prepare(struct rte_crypto_op *cop,
485 struct rte_cryptodev_sym_session *cs, struct rte_mbuf *mb)
487 struct rte_crypto_sym_op *sop;
490 cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
491 cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
492 cop->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
494 __rte_crypto_sym_op_attach_sym_session(sop, cs);
498 * setup crypto op and crypto sym op for ESP outbound packet.
501 esp_outb_cop_prepare(struct rte_crypto_op *cop,
502 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
503 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
505 struct rte_crypto_sym_op *sop;
506 struct aead_gcm_iv *gcm;
507 struct aesctr_cnt_blk *ctr;
508 uint8_t algo_type = sa->algo_type;
510 /* fill sym op fields */
514 case ALGO_TYPE_AES_CBC:
515 /* Cipher-Auth (AES-CBC *) case */
516 case ALGO_TYPE_3DES_CBC:
517 /* Cipher-Auth (3DES-CBC *) case */
520 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
521 sop->cipher.data.length = sa->ctp.cipher.length + plen;
522 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
523 sop->auth.data.length = sa->ctp.auth.length + plen;
524 sop->auth.digest.data = icv->va;
525 sop->auth.digest.phys_addr = icv->pa;
527 case ALGO_TYPE_AES_GCM:
528 /* AEAD (AES_GCM) case */
529 sop->aead.data.offset = sa->ctp.cipher.offset + hlen;
530 sop->aead.data.length = sa->ctp.cipher.length + plen;
531 sop->aead.digest.data = icv->va;
532 sop->aead.digest.phys_addr = icv->pa;
533 sop->aead.aad.data = icv->va + sa->icv_len;
534 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
536 /* fill AAD IV (located inside crypto op) */
537 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
539 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
541 case ALGO_TYPE_AES_CTR:
542 /* Cipher-Auth (AES-CTR *) case */
543 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
544 sop->cipher.data.length = sa->ctp.cipher.length + plen;
545 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
546 sop->auth.data.length = sa->ctp.auth.length + plen;
547 sop->auth.digest.data = icv->va;
548 sop->auth.digest.phys_addr = icv->pa;
550 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
552 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
560 * setup/update packet data and metadata for ESP outbound tunnel case.
562 static inline int32_t
563 esp_outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
564 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
565 union sym_op_data *icv)
567 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
569 struct esp_hdr *esph;
570 struct esp_tail *espt;
574 /* calculate extra header space required */
575 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
577 /* size of ipsec protected data */
579 plen = mb->pkt_len - l2len;
581 /* number of bytes to encrypt */
582 clen = plen + sizeof(*espt);
583 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
585 /* pad length + esp tail */
587 tlen = pdlen + sa->icv_len;
589 /* do append and prepend */
590 ml = rte_pktmbuf_lastseg(mb);
591 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
595 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
600 pdofs = ml->data_len;
601 ml->data_len += tlen;
603 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
605 /* update pkt l2/l3 len */
606 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
609 /* copy tunnel pkt header */
610 rte_memcpy(ph, sa->hdr, sa->hdr_len);
612 /* update original and new ip header fields */
613 update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
616 /* update spi, seqn and iv */
617 esph = (struct esp_hdr *)(ph + sa->hdr_len);
618 iv = (uint64_t *)(esph + 1);
619 copy_iv(iv, ivp, sa->iv_len);
622 esph->seq = sqn_low32(sqc);
625 pdofs += pdlen + sa->sqh_len;
628 pdlen -= sizeof(*espt);
630 /* copy padding data */
631 rte_memcpy(pt, esp_pad_bytes, pdlen);
633 /* update esp trailer */
634 espt = (struct esp_tail *)(pt + pdlen);
635 espt->pad_len = pdlen;
636 espt->next_proto = sa->proto;
638 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
639 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
645 * for pure cryptodev (lookaside none) depending on SA settings,
646 * we might have to write some extra data to the packet.
649 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
650 const union sym_op_data *icv)
653 struct aead_gcm_aad *aad;
654 uint8_t algo_type = sa->algo_type;
656 /* insert SQN.hi between ESP trailer and ICV */
657 if (sa->sqh_len != 0) {
658 psqh = (uint32_t *)(icv->va - sa->sqh_len);
659 psqh[0] = sqn_hi32(sqc);
663 * fill IV and AAD fields, if any (aad fields are placed after icv),
664 * right now we support only one AEAD algorithm: AES-GCM .
666 if (algo_type == ALGO_TYPE_AES_GCM) {
667 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
668 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
673 * setup/update packets and crypto ops for ESP outbound tunnel case.
676 outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
677 struct rte_crypto_op *cop[], uint16_t num)
683 struct rte_ipsec_sa *sa;
684 struct rte_cryptodev_sym_session *cs;
685 union sym_op_data icv;
686 uint64_t iv[IPSEC_MAX_IV_QWORD];
693 sqn = esn_outb_update_sqn(sa, &n);
695 rte_errno = EOVERFLOW;
698 for (i = 0; i != n; i++) {
700 sqc = rte_cpu_to_be_64(sqn + i);
703 /* try to update the packet itself */
704 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
706 /* success, setup crypto op */
708 outb_pkt_xprepare(sa, sqc, &icv);
709 lksd_none_cop_prepare(cop[k], cs, mb[i]);
710 esp_outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
712 /* failure, put packet into the death-row */
719 /* copy not prepared mbufs beyond good ones */
720 if (k != n && k != 0)
721 move_bad_mbufs(mb, dr, n, n - k);
727 * setup/update packet data and metadata for ESP outbound transport case.
729 static inline int32_t
730 esp_outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
731 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
732 uint32_t l2len, uint32_t l3len, union sym_op_data *icv)
735 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
737 struct esp_hdr *esph;
738 struct esp_tail *espt;
742 uhlen = l2len + l3len;
743 plen = mb->pkt_len - uhlen;
745 /* calculate extra header space required */
746 hlen = sa->iv_len + sizeof(*esph);
748 /* number of bytes to encrypt */
749 clen = plen + sizeof(*espt);
750 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
752 /* pad length + esp tail */
754 tlen = pdlen + sa->icv_len;
756 /* do append and insert */
757 ml = rte_pktmbuf_lastseg(mb);
758 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
761 /* prepend space for ESP header */
762 ph = rte_pktmbuf_prepend(mb, hlen);
767 pdofs = ml->data_len;
768 ml->data_len += tlen;
770 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
772 /* shift L2/L3 headers */
773 insert_esph(ph, ph + hlen, uhlen);
775 /* update ip header fields */
776 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
779 /* update spi, seqn and iv */
780 esph = (struct esp_hdr *)(ph + uhlen);
781 iv = (uint64_t *)(esph + 1);
782 copy_iv(iv, ivp, sa->iv_len);
785 esph->seq = sqn_low32(sqc);
788 pdofs += pdlen + sa->sqh_len;
791 pdlen -= sizeof(*espt);
793 /* copy padding data */
794 rte_memcpy(pt, esp_pad_bytes, pdlen);
796 /* update esp trailer */
797 espt = (struct esp_tail *)(pt + pdlen);
798 espt->pad_len = pdlen;
799 espt->next_proto = np;
801 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
802 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
808 * setup/update packets and crypto ops for ESP outbound transport case.
811 outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
812 struct rte_crypto_op *cop[], uint16_t num)
815 uint32_t i, k, n, l2, l3;
818 struct rte_ipsec_sa *sa;
819 struct rte_cryptodev_sym_session *cs;
820 union sym_op_data icv;
821 uint64_t iv[IPSEC_MAX_IV_QWORD];
828 sqn = esn_outb_update_sqn(sa, &n);
830 rte_errno = EOVERFLOW;
833 for (i = 0; i != n; i++) {
838 sqc = rte_cpu_to_be_64(sqn + i);
841 /* try to update the packet itself */
842 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
845 /* success, setup crypto op */
847 outb_pkt_xprepare(sa, sqc, &icv);
848 lksd_none_cop_prepare(cop[k], cs, mb[i]);
849 esp_outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
851 /* failure, put packet into the death-row */
858 /* copy not prepared mbufs beyond good ones */
859 if (k != n && k != 0)
860 move_bad_mbufs(mb, dr, n, n - k);
866 * setup crypto op and crypto sym op for ESP inbound tunnel packet.
868 static inline int32_t
869 esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,
870 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
871 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
873 struct rte_crypto_sym_op *sop;
874 struct aead_gcm_iv *gcm;
875 struct aesctr_cnt_blk *ctr;
878 uint8_t algo_type = sa->algo_type;
880 clen = plen - sa->ctp.cipher.length;
881 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
884 /* fill sym op fields */
888 case ALGO_TYPE_AES_GCM:
889 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
890 sop->aead.data.length = clen;
891 sop->aead.digest.data = icv->va;
892 sop->aead.digest.phys_addr = icv->pa;
893 sop->aead.aad.data = icv->va + sa->icv_len;
894 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
896 /* fill AAD IV (located inside crypto op) */
897 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
899 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
900 pofs + sizeof(struct esp_hdr));
901 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
903 case ALGO_TYPE_AES_CBC:
904 case ALGO_TYPE_3DES_CBC:
905 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
906 sop->cipher.data.length = clen;
907 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
908 sop->auth.data.length = plen - sa->ctp.auth.length;
909 sop->auth.digest.data = icv->va;
910 sop->auth.digest.phys_addr = icv->pa;
912 /* copy iv from the input packet to the cop */
913 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
914 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
915 pofs + sizeof(struct esp_hdr));
916 copy_iv(ivc, ivp, sa->iv_len);
918 case ALGO_TYPE_AES_CTR:
919 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
920 sop->cipher.data.length = clen;
921 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
922 sop->auth.data.length = plen - sa->ctp.auth.length;
923 sop->auth.digest.data = icv->va;
924 sop->auth.digest.phys_addr = icv->pa;
926 /* copy iv from the input packet to the cop */
927 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
929 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
930 pofs + sizeof(struct esp_hdr));
931 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
934 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
935 sop->cipher.data.length = clen;
936 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
937 sop->auth.data.length = plen - sa->ctp.auth.length;
938 sop->auth.digest.data = icv->va;
939 sop->auth.digest.phys_addr = icv->pa;
950 * for pure cryptodev (lookaside none) depending on SA settings,
951 * we might have to write some extra data to the packet.
954 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
955 const union sym_op_data *icv)
957 struct aead_gcm_aad *aad;
959 /* insert SQN.hi between ESP trailer and ICV */
960 if (sa->sqh_len != 0)
961 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
964 * fill AAD fields, if any (aad fields are placed after icv),
965 * right now we support only one AEAD algorithm: AES-GCM.
967 if (sa->aad_len != 0) {
968 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
969 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
974 * setup/update packet data and metadata for ESP inbound tunnel case.
976 static inline int32_t
977 esp_inb_tun_pkt_prepare(const struct rte_ipsec_sa *sa,
978 const struct replay_sqn *rsn, struct rte_mbuf *mb,
979 uint32_t hlen, union sym_op_data *icv)
983 uint32_t icv_ofs, plen;
985 struct esp_hdr *esph;
987 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
990 * retrieve and reconstruct SQN, then check it, then
991 * convert it back into network byte order.
993 sqn = rte_be_to_cpu_32(esph->seq);
995 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
997 rc = esn_inb_check_sqn(rsn, sa, sqn);
1001 sqn = rte_cpu_to_be_64(sqn);
1003 /* start packet manipulation */
1007 ml = rte_pktmbuf_lastseg(mb);
1008 icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
1010 /* we have to allocate space for AAD somewhere,
1011 * right now - just use free trailing space at the last segment.
1012 * Would probably be more convenient to reserve space for AAD
1013 * inside rte_crypto_op itself
1014 * (again for IV space is already reserved inside cop).
1016 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
1019 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
1020 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
1022 inb_pkt_xprepare(sa, sqn, icv);
1027 * setup/update packets and crypto ops for ESP inbound case.
1030 inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1031 struct rte_crypto_op *cop[], uint16_t num)
1035 struct rte_ipsec_sa *sa;
1036 struct rte_cryptodev_sym_session *cs;
1037 struct replay_sqn *rsn;
1038 union sym_op_data icv;
1042 cs = ss->crypto.ses;
1043 rsn = rsn_acquire(sa);
1046 for (i = 0; i != num; i++) {
1048 hl = mb[i]->l2_len + mb[i]->l3_len;
1049 rc = esp_inb_tun_pkt_prepare(sa, rsn, mb[i], hl, &icv);
1051 lksd_none_cop_prepare(cop[k], cs, mb[i]);
1052 rc = esp_inb_tun_cop_prepare(cop[k], sa, mb[i], &icv,
1063 rsn_release(sa, rsn);
1065 /* copy not prepared mbufs beyond good ones */
1066 if (k != num && k != 0)
1067 move_bad_mbufs(mb, dr, num, num - k);
1073 * setup crypto ops for LOOKASIDE_PROTO type of devices.
1076 lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
1077 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
1080 struct rte_crypto_sym_op *sop;
1082 for (i = 0; i != num; i++) {
1084 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1085 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1086 cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
1088 __rte_security_attach_session(sop, ss->security.ses);
1093 * setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
1094 * Note that for LOOKASIDE_PROTO all packet modifications will be
1095 * performed by PMD/HW.
1096 * SW has only to prepare crypto op.
1099 lksd_proto_prepare(const struct rte_ipsec_session *ss,
1100 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
1102 lksd_proto_cop_prepare(ss, mb, cop, num);
1107 * process ESP inbound tunnel packet.
1110 esp_inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
1113 uint32_t hlen, icv_len, tlen;
1114 struct esp_hdr *esph;
1115 struct esp_tail *espt;
1116 struct rte_mbuf *ml;
1119 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1122 icv_len = sa->icv_len;
1124 ml = rte_pktmbuf_lastseg(mb);
1125 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1126 ml->data_len - icv_len - sizeof(*espt));
1129 * check padding and next proto.
1130 * return an error if something is wrong.
1132 pd = (char *)espt - espt->pad_len;
1133 if (espt->next_proto != sa->proto ||
1134 memcmp(pd, esp_pad_bytes, espt->pad_len))
1137 /* cut of ICV, ESP tail and padding bytes */
1138 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1139 ml->data_len -= tlen;
1140 mb->pkt_len -= tlen;
1142 /* cut of L2/L3 headers, ESP header and IV */
1143 hlen = mb->l2_len + mb->l3_len;
1144 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
1145 rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);
1147 /* retrieve SQN for later check */
1148 *sqn = rte_be_to_cpu_32(esph->seq);
1150 /* reset mbuf metatdata: L2/L3 len, packet type */
1151 mb->packet_type = RTE_PTYPE_UNKNOWN;
1152 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
1155 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1156 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1161 * process ESP inbound transport packet.
1164 esp_inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
1167 uint32_t hlen, icv_len, l2len, l3len, tlen;
1168 struct esp_hdr *esph;
1169 struct esp_tail *espt;
1170 struct rte_mbuf *ml;
1173 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1176 icv_len = sa->icv_len;
1178 ml = rte_pktmbuf_lastseg(mb);
1179 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1180 ml->data_len - icv_len - sizeof(*espt));
1182 /* check padding, return an error if something is wrong. */
1183 pd = (char *)espt - espt->pad_len;
1184 if (memcmp(pd, esp_pad_bytes, espt->pad_len))
1187 /* cut of ICV, ESP tail and padding bytes */
1188 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1189 ml->data_len -= tlen;
1190 mb->pkt_len -= tlen;
1192 /* retrieve SQN for later check */
1195 hlen = l2len + l3len;
1196 op = rte_pktmbuf_mtod(mb, char *);
1197 esph = (struct esp_hdr *)(op + hlen);
1198 *sqn = rte_be_to_cpu_32(esph->seq);
1200 /* cut off ESP header and IV, update L3 header */
1201 np = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);
1202 remove_esph(np, op, hlen);
1203 update_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,
1206 /* reset mbuf packet type */
1207 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
1209 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1210 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1215 * for group of ESP inbound packets perform SQN check and update.
1217 static inline uint16_t
1218 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
1219 uint32_t dr[], uint16_t num)
1222 struct replay_sqn *rsn;
1224 rsn = rsn_update_start(sa);
1227 for (i = 0; i != num; i++) {
1228 if (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)
1234 rsn_update_finish(sa, rsn);
1239 * process group of ESP inbound tunnel packets.
1242 inb_tun_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1246 struct rte_ipsec_sa *sa;
1252 /* process packets, extract seq numbers */
1255 for (i = 0; i != num; i++) {
1257 if (esp_inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)
1259 /* bad packet, will drop from furhter processing */
1264 /* handle unprocessed mbufs */
1265 if (k != num && k != 0)
1266 move_bad_mbufs(mb, dr, num, num - k);
1268 /* update SQN and replay winow */
1269 n = esp_inb_rsn_update(sa, sqn, dr, k);
1271 /* handle mbufs with wrong SQN */
1272 if (n != k && n != 0)
1273 move_bad_mbufs(mb, dr, k, k - n);
1276 rte_errno = EBADMSG;
1282 * process group of ESP inbound transport packets.
1285 inb_trs_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1290 struct rte_ipsec_sa *sa;
1295 /* process packets, extract seq numbers */
1298 for (i = 0; i != num; i++) {
1300 if (esp_inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)
1302 /* bad packet, will drop from furhter processing */
1307 /* handle unprocessed mbufs */
1308 if (k != num && k != 0)
1309 move_bad_mbufs(mb, dr, num, num - k);
1311 /* update SQN and replay winow */
1312 n = esp_inb_rsn_update(sa, sqn, dr, k);
1314 /* handle mbufs with wrong SQN */
1315 if (n != k && n != 0)
1316 move_bad_mbufs(mb, dr, k, k - n);
1319 rte_errno = EBADMSG;
1325 * process outbound packets for SA with ESN support,
1326 * for algorithms that require SQN.hibits to be implictly included
1327 * into digest computation.
1328 * In that case we have to move ICV bytes back to their proper place.
1331 outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1334 uint32_t i, k, icv_len, *icv;
1335 struct rte_mbuf *ml;
1336 struct rte_ipsec_sa *sa;
1342 icv_len = sa->icv_len;
1344 for (i = 0; i != num; i++) {
1345 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
1346 ml = rte_pktmbuf_lastseg(mb[i]);
1347 icv = rte_pktmbuf_mtod_offset(ml, void *,
1348 ml->data_len - icv_len);
1349 remove_sqh(icv, icv_len);
1355 /* handle unprocessed mbufs */
1357 rte_errno = EBADMSG;
1359 move_bad_mbufs(mb, dr, num, num - k);
1366 * simplest pkt process routine:
1367 * all actual processing is already done by HW/PMD,
1368 * just check mbuf ol_flags.
1370 * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
1371 * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
1372 * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
1375 pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1384 for (i = 0; i != num; i++) {
1385 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
1391 /* handle unprocessed mbufs */
1393 rte_errno = EBADMSG;
1395 move_bad_mbufs(mb, dr, num, num - k);
1402 * prepare packets for inline ipsec processing:
1403 * set ol_flags and attach metadata.
1406 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
1407 struct rte_mbuf *mb[], uint16_t num)
1409 uint32_t i, ol_flags;
1411 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
1412 for (i = 0; i != num; i++) {
1414 mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
1416 rte_security_set_pkt_metadata(ss->security.ctx,
1417 ss->security.ses, mb[i], NULL);
1422 * process group of ESP outbound tunnel packets destined for
1423 * INLINE_CRYPTO type of device.
1426 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
1427 struct rte_mbuf *mb[], uint16_t num)
1433 struct rte_ipsec_sa *sa;
1434 union sym_op_data icv;
1435 uint64_t iv[IPSEC_MAX_IV_QWORD];
1441 sqn = esn_outb_update_sqn(sa, &n);
1443 rte_errno = EOVERFLOW;
1446 for (i = 0; i != n; i++) {
1448 sqc = rte_cpu_to_be_64(sqn + i);
1451 /* try to update the packet itself */
1452 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
1456 /* failure, put packet into the death-row */
1463 /* copy not processed mbufs beyond good ones */
1464 if (k != n && k != 0)
1465 move_bad_mbufs(mb, dr, n, n - k);
1467 inline_outb_mbuf_prepare(ss, mb, k);
1472 * process group of ESP outbound transport packets destined for
1473 * INLINE_CRYPTO type of device.
1476 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
1477 struct rte_mbuf *mb[], uint16_t num)
1480 uint32_t i, k, n, l2, l3;
1483 struct rte_ipsec_sa *sa;
1484 union sym_op_data icv;
1485 uint64_t iv[IPSEC_MAX_IV_QWORD];
1491 sqn = esn_outb_update_sqn(sa, &n);
1493 rte_errno = EOVERFLOW;
1496 for (i = 0; i != n; i++) {
1501 sqc = rte_cpu_to_be_64(sqn + i);
1504 /* try to update the packet itself */
1505 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
1510 /* failure, put packet into the death-row */
1517 /* copy not processed mbufs beyond good ones */
1518 if (k != n && k != 0)
1519 move_bad_mbufs(mb, dr, n, n - k);
1521 inline_outb_mbuf_prepare(ss, mb, k);
1526 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1527 * actual processing is done by HW/PMD, just set flags and metadata.
1530 outb_inline_proto_process(const struct rte_ipsec_session *ss,
1531 struct rte_mbuf *mb[], uint16_t num)
1533 inline_outb_mbuf_prepare(ss, mb, num);
1538 * Select packet processing function for session on LOOKASIDE_NONE
1542 lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
1543 struct rte_ipsec_sa_pkt_func *pf)
1547 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1548 RTE_IPSEC_SATP_MODE_MASK;
1551 switch (sa->type & msk) {
1552 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1553 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1554 pf->prepare = inb_pkt_prepare;
1555 pf->process = inb_tun_pkt_process;
1557 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1558 pf->prepare = inb_pkt_prepare;
1559 pf->process = inb_trs_pkt_process;
1561 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1562 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1563 pf->prepare = outb_tun_prepare;
1564 pf->process = (sa->sqh_len != 0) ?
1565 outb_sqh_process : pkt_flag_process;
1567 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1568 pf->prepare = outb_trs_prepare;
1569 pf->process = (sa->sqh_len != 0) ?
1570 outb_sqh_process : pkt_flag_process;
1580 * Select packet processing function for session on INLINE_CRYPTO
1584 inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
1585 struct rte_ipsec_sa_pkt_func *pf)
1589 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1590 RTE_IPSEC_SATP_MODE_MASK;
1593 switch (sa->type & msk) {
1594 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1595 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1596 pf->process = inb_tun_pkt_process;
1598 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1599 pf->process = inb_trs_pkt_process;
1601 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1602 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1603 pf->process = inline_outb_tun_pkt_process;
1605 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1606 pf->process = inline_outb_trs_pkt_process;
1616 * Select packet processing function for given session based on SA parameters
1617 * and type of associated with the session device.
1620 ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
1621 const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
1626 pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 };
1629 case RTE_SECURITY_ACTION_TYPE_NONE:
1630 rc = lksd_none_pkt_func_select(sa, pf);
1632 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1633 rc = inline_crypto_pkt_func_select(sa, pf);
1635 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1636 if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
1637 RTE_IPSEC_SATP_DIR_IB)
1638 pf->process = pkt_flag_process;
1640 pf->process = outb_inline_proto_process;
1642 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
1643 pf->prepare = lksd_proto_prepare;
1644 pf->process = pkt_flag_process;