1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
17 #define MBUF_MAX_L2_LEN RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
18 #define MBUF_MAX_L3_LEN RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t)
20 /* some helper structures */
22 struct rte_crypto_auth_xform *auth;
23 struct rte_crypto_cipher_xform *cipher;
24 struct rte_crypto_aead_xform *aead;
28 * helper routine, fills internal crypto_xform structure.
31 fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
32 const struct rte_ipsec_sa_prm *prm)
34 struct rte_crypto_sym_xform *xf, *xfn;
36 memset(xform, 0, sizeof(*xform));
38 xf = prm->crypto_xform;
44 /* for AEAD just one xform required */
45 if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
48 xform->aead = &xf->aead;
50 * CIPHER+AUTH xforms are expected in strict order,
51 * depending on SA direction:
52 * inbound: AUTH+CIPHER
53 * outbound: CIPHER+AUTH
55 } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
57 /* wrong order or no cipher */
58 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
59 xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
62 xform->auth = &xf->auth;
63 xform->cipher = &xfn->cipher;
67 /* wrong order or no auth */
68 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
69 xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
72 xform->cipher = &xf->cipher;
73 xform->auth = &xfn->auth;
79 uint64_t __rte_experimental
80 rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
86 ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
93 if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
96 * RFC 4303 recommends 64 as minimum window size.
97 * there is no point to use ESN mode without SQN window,
98 * so make sure we have at least 64 window when ESN is enalbed.
100 wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
101 RTE_IPSEC_SATP_ESN_DISABLE) ?
102 wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
104 n = replay_num_bucket(wsz);
107 if (n > WINDOW_BUCKET_MAX)
114 if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
115 sz *= REPLAY_SQN_NUM;
117 sz += sizeof(struct rte_ipsec_sa);
121 void __rte_experimental
122 rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
124 memset(sa, 0, sa->size);
128 * Determine expected SA type based on input parameters.
131 fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
137 if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
138 tp |= RTE_IPSEC_SATP_PROTO_AH;
139 else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
140 tp |= RTE_IPSEC_SATP_PROTO_ESP;
144 if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
145 tp |= RTE_IPSEC_SATP_DIR_OB;
146 else if (prm->ipsec_xform.direction ==
147 RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
148 tp |= RTE_IPSEC_SATP_DIR_IB;
152 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
153 if (prm->ipsec_xform.tunnel.type ==
154 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
155 tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
156 else if (prm->ipsec_xform.tunnel.type ==
157 RTE_SECURITY_IPSEC_TUNNEL_IPV6)
158 tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
162 if (prm->tun.next_proto == IPPROTO_IPIP)
163 tp |= RTE_IPSEC_SATP_IPV4;
164 else if (prm->tun.next_proto == IPPROTO_IPV6)
165 tp |= RTE_IPSEC_SATP_IPV6;
168 } else if (prm->ipsec_xform.mode ==
169 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
170 tp |= RTE_IPSEC_SATP_MODE_TRANS;
171 if (prm->trs.proto == IPPROTO_IPIP)
172 tp |= RTE_IPSEC_SATP_IPV4;
173 else if (prm->trs.proto == IPPROTO_IPV6)
174 tp |= RTE_IPSEC_SATP_IPV6;
180 /* check for ESN flag */
181 if (prm->ipsec_xform.options.esn == 0)
182 tp |= RTE_IPSEC_SATP_ESN_DISABLE;
184 tp |= RTE_IPSEC_SATP_ESN_ENABLE;
186 /* interpret flags */
187 if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
188 tp |= RTE_IPSEC_SATP_SQN_ATOM;
190 tp |= RTE_IPSEC_SATP_SQN_RAW;
197 * Init ESP inbound specific things.
200 esp_inb_init(struct rte_ipsec_sa *sa)
202 /* these params may differ with new algorithms support */
203 sa->ctp.auth.offset = 0;
204 sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
205 sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
206 sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
210 * Init ESP inbound tunnel specific things.
213 esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
215 sa->proto = prm->tun.next_proto;
220 * Init ESP outbound specific things.
223 esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
227 sa->sqn.outb.raw = 1;
229 /* these params may differ with new algorithms support */
230 sa->ctp.auth.offset = hlen;
231 sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
233 algo_type = sa->algo_type;
236 case ALGO_TYPE_AES_GCM:
237 case ALGO_TYPE_AES_CTR:
239 sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
241 sa->ctp.cipher.length = 0;
243 case ALGO_TYPE_AES_CBC:
244 case ALGO_TYPE_3DES_CBC:
245 sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
246 sa->ctp.cipher.length = sa->iv_len;
252 * Init ESP outbound tunnel specific things.
255 esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
257 sa->proto = prm->tun.next_proto;
258 sa->hdr_len = prm->tun.hdr_len;
259 sa->hdr_l3_off = prm->tun.hdr_l3_off;
261 /* update l2_len and l3_len fields for outbound mbuf */
262 sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
263 sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
265 memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
267 esp_outb_init(sa, sa->hdr_len);
271 * helper function, init SA structure.
274 esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
275 const struct crypto_xform *cxf)
277 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
278 RTE_IPSEC_SATP_MODE_MASK;
280 if (cxf->aead != NULL) {
281 switch (cxf->aead->algo) {
282 case RTE_CRYPTO_AEAD_AES_GCM:
284 sa->aad_len = sizeof(struct aead_gcm_aad);
285 sa->icv_len = cxf->aead->digest_length;
286 sa->iv_ofs = cxf->aead->iv.offset;
287 sa->iv_len = sizeof(uint64_t);
288 sa->pad_align = IPSEC_PAD_AES_GCM;
289 sa->algo_type = ALGO_TYPE_AES_GCM;
295 sa->icv_len = cxf->auth->digest_length;
296 sa->iv_ofs = cxf->cipher->iv.offset;
297 sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
299 switch (cxf->cipher->algo) {
300 case RTE_CRYPTO_CIPHER_NULL:
301 sa->pad_align = IPSEC_PAD_NULL;
303 sa->algo_type = ALGO_TYPE_NULL;
306 case RTE_CRYPTO_CIPHER_AES_CBC:
307 sa->pad_align = IPSEC_PAD_AES_CBC;
308 sa->iv_len = IPSEC_MAX_IV_SIZE;
309 sa->algo_type = ALGO_TYPE_AES_CBC;
312 case RTE_CRYPTO_CIPHER_AES_CTR:
314 sa->pad_align = IPSEC_PAD_AES_CTR;
315 sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
316 sa->algo_type = ALGO_TYPE_AES_CTR;
319 case RTE_CRYPTO_CIPHER_3DES_CBC:
321 sa->pad_align = IPSEC_PAD_3DES_CBC;
322 sa->iv_len = IPSEC_3DES_IV_SIZE;
323 sa->algo_type = ALGO_TYPE_3DES_CBC;
331 sa->udata = prm->userdata;
332 sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
333 sa->salt = prm->ipsec_xform.salt;
335 /* preserve all values except l2_len and l3_len */
337 ~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
340 switch (sa->type & msk) {
341 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
342 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
343 esp_inb_tun_init(sa, prm);
345 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
348 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
349 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
350 esp_outb_tun_init(sa, prm);
352 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
353 esp_outb_init(sa, 0);
361 * helper function, init SA replay structure.
364 fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket)
366 sa->replay.win_sz = wnd_sz;
367 sa->replay.nb_bucket = nb_bucket;
368 sa->replay.bucket_index_mask = nb_bucket - 1;
369 sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
370 if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
371 sa->sqn.inb.rsn[1] = (struct replay_sqn *)
372 ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
375 int __rte_experimental
376 rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
385 /* determine SA type */
386 rc = fill_sa_type(prm, &type);
390 /* determine required size */
391 wsz = prm->replay_win_sz;
392 return ipsec_sa_size(type, &wsz, &nb);
395 int __rte_experimental
396 rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
402 struct crypto_xform cxf;
404 if (sa == NULL || prm == NULL)
407 /* determine SA type */
408 rc = fill_sa_type(prm, &type);
412 /* determine required size */
413 wsz = prm->replay_win_sz;
414 sz = ipsec_sa_size(type, &wsz, &nb);
417 else if (size < (uint32_t)sz)
420 /* only esp is supported right now */
421 if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
424 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
425 prm->tun.hdr_len > sizeof(sa->hdr))
428 rc = fill_crypto_xform(&cxf, type, prm);
438 /* check for ESN flag */
439 sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
440 UINT32_MAX : UINT64_MAX;
442 rc = esp_sa_init(sa, prm, &cxf);
444 rte_ipsec_sa_fini(sa);
446 /* fill replay window related fields */
448 fill_sa_replay(sa, wsz, nb);
454 mbuf_bulk_copy(struct rte_mbuf *dst[], struct rte_mbuf * const src[],
459 for (i = 0; i != num; i++)
464 * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.
467 lksd_none_cop_prepare(struct rte_crypto_op *cop,
468 struct rte_cryptodev_sym_session *cs, struct rte_mbuf *mb)
470 struct rte_crypto_sym_op *sop;
473 cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
474 cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
475 cop->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
477 __rte_crypto_sym_op_attach_sym_session(sop, cs);
481 * setup crypto op and crypto sym op for ESP outbound packet.
484 esp_outb_cop_prepare(struct rte_crypto_op *cop,
485 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
486 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
488 struct rte_crypto_sym_op *sop;
489 struct aead_gcm_iv *gcm;
490 struct aesctr_cnt_blk *ctr;
491 uint8_t algo_type = sa->algo_type;
493 /* fill sym op fields */
497 case ALGO_TYPE_AES_CBC:
498 /* Cipher-Auth (AES-CBC *) case */
499 case ALGO_TYPE_3DES_CBC:
500 /* Cipher-Auth (3DES-CBC *) case */
503 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
504 sop->cipher.data.length = sa->ctp.cipher.length + plen;
505 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
506 sop->auth.data.length = sa->ctp.auth.length + plen;
507 sop->auth.digest.data = icv->va;
508 sop->auth.digest.phys_addr = icv->pa;
510 case ALGO_TYPE_AES_GCM:
511 /* AEAD (AES_GCM) case */
512 sop->aead.data.offset = sa->ctp.cipher.offset + hlen;
513 sop->aead.data.length = sa->ctp.cipher.length + plen;
514 sop->aead.digest.data = icv->va;
515 sop->aead.digest.phys_addr = icv->pa;
516 sop->aead.aad.data = icv->va + sa->icv_len;
517 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
519 /* fill AAD IV (located inside crypto op) */
520 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
522 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
524 case ALGO_TYPE_AES_CTR:
525 /* Cipher-Auth (AES-CTR *) case */
526 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
527 sop->cipher.data.length = sa->ctp.cipher.length + plen;
528 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
529 sop->auth.data.length = sa->ctp.auth.length + plen;
530 sop->auth.digest.data = icv->va;
531 sop->auth.digest.phys_addr = icv->pa;
533 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
535 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
543 * setup/update packet data and metadata for ESP outbound tunnel case.
545 static inline int32_t
546 esp_outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
547 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
548 union sym_op_data *icv)
550 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
552 struct esp_hdr *esph;
553 struct esp_tail *espt;
557 /* calculate extra header space required */
558 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
560 /* size of ipsec protected data */
562 plen = mb->pkt_len - l2len;
564 /* number of bytes to encrypt */
565 clen = plen + sizeof(*espt);
566 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
568 /* pad length + esp tail */
570 tlen = pdlen + sa->icv_len;
572 /* do append and prepend */
573 ml = rte_pktmbuf_lastseg(mb);
574 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
578 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
583 pdofs = ml->data_len;
584 ml->data_len += tlen;
586 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
588 /* update pkt l2/l3 len */
589 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
592 /* copy tunnel pkt header */
593 rte_memcpy(ph, sa->hdr, sa->hdr_len);
595 /* update original and new ip header fields */
596 update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
599 /* update spi, seqn and iv */
600 esph = (struct esp_hdr *)(ph + sa->hdr_len);
601 iv = (uint64_t *)(esph + 1);
602 copy_iv(iv, ivp, sa->iv_len);
605 esph->seq = sqn_low32(sqc);
608 pdofs += pdlen + sa->sqh_len;
611 pdlen -= sizeof(*espt);
613 /* copy padding data */
614 rte_memcpy(pt, esp_pad_bytes, pdlen);
616 /* update esp trailer */
617 espt = (struct esp_tail *)(pt + pdlen);
618 espt->pad_len = pdlen;
619 espt->next_proto = sa->proto;
621 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
622 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
628 * for pure cryptodev (lookaside none) depending on SA settings,
629 * we might have to write some extra data to the packet.
632 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
633 const union sym_op_data *icv)
636 struct aead_gcm_aad *aad;
637 uint8_t algo_type = sa->algo_type;
639 /* insert SQN.hi between ESP trailer and ICV */
640 if (sa->sqh_len != 0) {
641 psqh = (uint32_t *)(icv->va - sa->sqh_len);
642 psqh[0] = sqn_hi32(sqc);
646 * fill IV and AAD fields, if any (aad fields are placed after icv),
647 * right now we support only one AEAD algorithm: AES-GCM .
649 if (algo_type == ALGO_TYPE_AES_GCM) {
650 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
651 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
656 * setup/update packets and crypto ops for ESP outbound tunnel case.
659 outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
660 struct rte_crypto_op *cop[], uint16_t num)
666 struct rte_ipsec_sa *sa;
667 struct rte_cryptodev_sym_session *cs;
668 union sym_op_data icv;
669 uint64_t iv[IPSEC_MAX_IV_QWORD];
670 struct rte_mbuf *dr[num];
676 sqn = esn_outb_update_sqn(sa, &n);
678 rte_errno = EOVERFLOW;
681 for (i = 0; i != n; i++) {
683 sqc = rte_cpu_to_be_64(sqn + i);
686 /* try to update the packet itself */
687 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
689 /* success, setup crypto op */
691 outb_pkt_xprepare(sa, sqc, &icv);
692 lksd_none_cop_prepare(cop[k], cs, mb[i]);
693 esp_outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
695 /* failure, put packet into the death-row */
702 /* copy not prepared mbufs beyond good ones */
703 if (k != n && k != 0)
704 mbuf_bulk_copy(mb + k, dr, n - k);
710 * setup/update packet data and metadata for ESP outbound transport case.
712 static inline int32_t
713 esp_outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
714 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
715 uint32_t l2len, uint32_t l3len, union sym_op_data *icv)
718 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
720 struct esp_hdr *esph;
721 struct esp_tail *espt;
725 uhlen = l2len + l3len;
726 plen = mb->pkt_len - uhlen;
728 /* calculate extra header space required */
729 hlen = sa->iv_len + sizeof(*esph);
731 /* number of bytes to encrypt */
732 clen = plen + sizeof(*espt);
733 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
735 /* pad length + esp tail */
737 tlen = pdlen + sa->icv_len;
739 /* do append and insert */
740 ml = rte_pktmbuf_lastseg(mb);
741 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
744 /* prepend space for ESP header */
745 ph = rte_pktmbuf_prepend(mb, hlen);
750 pdofs = ml->data_len;
751 ml->data_len += tlen;
753 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
755 /* shift L2/L3 headers */
756 insert_esph(ph, ph + hlen, uhlen);
758 /* update ip header fields */
759 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
762 /* update spi, seqn and iv */
763 esph = (struct esp_hdr *)(ph + uhlen);
764 iv = (uint64_t *)(esph + 1);
765 copy_iv(iv, ivp, sa->iv_len);
768 esph->seq = sqn_low32(sqc);
771 pdofs += pdlen + sa->sqh_len;
774 pdlen -= sizeof(*espt);
776 /* copy padding data */
777 rte_memcpy(pt, esp_pad_bytes, pdlen);
779 /* update esp trailer */
780 espt = (struct esp_tail *)(pt + pdlen);
781 espt->pad_len = pdlen;
782 espt->next_proto = np;
784 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
785 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
791 * setup/update packets and crypto ops for ESP outbound transport case.
794 outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
795 struct rte_crypto_op *cop[], uint16_t num)
798 uint32_t i, k, n, l2, l3;
801 struct rte_ipsec_sa *sa;
802 struct rte_cryptodev_sym_session *cs;
803 union sym_op_data icv;
804 uint64_t iv[IPSEC_MAX_IV_QWORD];
805 struct rte_mbuf *dr[num];
811 sqn = esn_outb_update_sqn(sa, &n);
813 rte_errno = EOVERFLOW;
816 for (i = 0; i != n; i++) {
821 sqc = rte_cpu_to_be_64(sqn + i);
824 /* try to update the packet itself */
825 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
828 /* success, setup crypto op */
830 outb_pkt_xprepare(sa, sqc, &icv);
831 lksd_none_cop_prepare(cop[k], cs, mb[i]);
832 esp_outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
834 /* failure, put packet into the death-row */
841 /* copy not prepared mbufs beyond good ones */
842 if (k != n && k != 0)
843 mbuf_bulk_copy(mb + k, dr, n - k);
849 * setup crypto op and crypto sym op for ESP inbound tunnel packet.
851 static inline int32_t
852 esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,
853 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
854 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
856 struct rte_crypto_sym_op *sop;
857 struct aead_gcm_iv *gcm;
858 struct aesctr_cnt_blk *ctr;
861 uint8_t algo_type = sa->algo_type;
863 clen = plen - sa->ctp.cipher.length;
864 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
867 /* fill sym op fields */
871 case ALGO_TYPE_AES_GCM:
872 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
873 sop->aead.data.length = clen;
874 sop->aead.digest.data = icv->va;
875 sop->aead.digest.phys_addr = icv->pa;
876 sop->aead.aad.data = icv->va + sa->icv_len;
877 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
879 /* fill AAD IV (located inside crypto op) */
880 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
882 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
883 pofs + sizeof(struct esp_hdr));
884 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
886 case ALGO_TYPE_AES_CBC:
887 case ALGO_TYPE_3DES_CBC:
888 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
889 sop->cipher.data.length = clen;
890 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
891 sop->auth.data.length = plen - sa->ctp.auth.length;
892 sop->auth.digest.data = icv->va;
893 sop->auth.digest.phys_addr = icv->pa;
895 /* copy iv from the input packet to the cop */
896 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
897 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
898 pofs + sizeof(struct esp_hdr));
899 copy_iv(ivc, ivp, sa->iv_len);
901 case ALGO_TYPE_AES_CTR:
902 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
903 sop->cipher.data.length = clen;
904 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
905 sop->auth.data.length = plen - sa->ctp.auth.length;
906 sop->auth.digest.data = icv->va;
907 sop->auth.digest.phys_addr = icv->pa;
909 /* copy iv from the input packet to the cop */
910 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
912 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
913 pofs + sizeof(struct esp_hdr));
914 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
917 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
918 sop->cipher.data.length = clen;
919 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
920 sop->auth.data.length = plen - sa->ctp.auth.length;
921 sop->auth.digest.data = icv->va;
922 sop->auth.digest.phys_addr = icv->pa;
933 * for pure cryptodev (lookaside none) depending on SA settings,
934 * we might have to write some extra data to the packet.
937 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
938 const union sym_op_data *icv)
940 struct aead_gcm_aad *aad;
942 /* insert SQN.hi between ESP trailer and ICV */
943 if (sa->sqh_len != 0)
944 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
947 * fill AAD fields, if any (aad fields are placed after icv),
948 * right now we support only one AEAD algorithm: AES-GCM.
950 if (sa->aad_len != 0) {
951 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
952 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
957 * setup/update packet data and metadata for ESP inbound tunnel case.
959 static inline int32_t
960 esp_inb_tun_pkt_prepare(const struct rte_ipsec_sa *sa,
961 const struct replay_sqn *rsn, struct rte_mbuf *mb,
962 uint32_t hlen, union sym_op_data *icv)
966 uint32_t icv_ofs, plen;
968 struct esp_hdr *esph;
970 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
973 * retrieve and reconstruct SQN, then check it, then
974 * convert it back into network byte order.
976 sqn = rte_be_to_cpu_32(esph->seq);
978 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
980 rc = esn_inb_check_sqn(rsn, sa, sqn);
984 sqn = rte_cpu_to_be_64(sqn);
986 /* start packet manipulation */
990 ml = rte_pktmbuf_lastseg(mb);
991 icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
993 /* we have to allocate space for AAD somewhere,
994 * right now - just use free trailing space at the last segment.
995 * Would probably be more convenient to reserve space for AAD
996 * inside rte_crypto_op itself
997 * (again for IV space is already reserved inside cop).
999 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
1002 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
1003 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
1005 inb_pkt_xprepare(sa, sqn, icv);
1010 * setup/update packets and crypto ops for ESP inbound case.
1013 inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1014 struct rte_crypto_op *cop[], uint16_t num)
1018 struct rte_ipsec_sa *sa;
1019 struct rte_cryptodev_sym_session *cs;
1020 struct replay_sqn *rsn;
1021 union sym_op_data icv;
1022 struct rte_mbuf *dr[num];
1025 cs = ss->crypto.ses;
1026 rsn = rsn_acquire(sa);
1029 for (i = 0; i != num; i++) {
1031 hl = mb[i]->l2_len + mb[i]->l3_len;
1032 rc = esp_inb_tun_pkt_prepare(sa, rsn, mb[i], hl, &icv);
1034 lksd_none_cop_prepare(cop[k], cs, mb[i]);
1035 rc = esp_inb_tun_cop_prepare(cop[k], sa, mb[i], &icv,
1047 rsn_release(sa, rsn);
1049 /* copy not prepared mbufs beyond good ones */
1050 if (k != num && k != 0)
1051 mbuf_bulk_copy(mb + k, dr, num - k);
1057 * setup crypto ops for LOOKASIDE_PROTO type of devices.
1060 lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
1061 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
1064 struct rte_crypto_sym_op *sop;
1066 for (i = 0; i != num; i++) {
1068 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1069 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1070 cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
1072 __rte_security_attach_session(sop, ss->security.ses);
1077 * setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
1078 * Note that for LOOKASIDE_PROTO all packet modifications will be
1079 * performed by PMD/HW.
1080 * SW has only to prepare crypto op.
1083 lksd_proto_prepare(const struct rte_ipsec_session *ss,
1084 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
1086 lksd_proto_cop_prepare(ss, mb, cop, num);
1091 * process ESP inbound tunnel packet.
1094 esp_inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
1097 uint32_t hlen, icv_len, tlen;
1098 struct esp_hdr *esph;
1099 struct esp_tail *espt;
1100 struct rte_mbuf *ml;
1103 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1106 icv_len = sa->icv_len;
1108 ml = rte_pktmbuf_lastseg(mb);
1109 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1110 ml->data_len - icv_len - sizeof(*espt));
1113 * check padding and next proto.
1114 * return an error if something is wrong.
1116 pd = (char *)espt - espt->pad_len;
1117 if (espt->next_proto != sa->proto ||
1118 memcmp(pd, esp_pad_bytes, espt->pad_len))
1121 /* cut of ICV, ESP tail and padding bytes */
1122 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1123 ml->data_len -= tlen;
1124 mb->pkt_len -= tlen;
1126 /* cut of L2/L3 headers, ESP header and IV */
1127 hlen = mb->l2_len + mb->l3_len;
1128 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
1129 rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);
1131 /* retrieve SQN for later check */
1132 *sqn = rte_be_to_cpu_32(esph->seq);
1134 /* reset mbuf metatdata: L2/L3 len, packet type */
1135 mb->packet_type = RTE_PTYPE_UNKNOWN;
1136 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
1139 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1140 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1145 * process ESP inbound transport packet.
1148 esp_inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
1151 uint32_t hlen, icv_len, l2len, l3len, tlen;
1152 struct esp_hdr *esph;
1153 struct esp_tail *espt;
1154 struct rte_mbuf *ml;
1157 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1160 icv_len = sa->icv_len;
1162 ml = rte_pktmbuf_lastseg(mb);
1163 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1164 ml->data_len - icv_len - sizeof(*espt));
1166 /* check padding, return an error if something is wrong. */
1167 pd = (char *)espt - espt->pad_len;
1168 if (memcmp(pd, esp_pad_bytes, espt->pad_len))
1171 /* cut of ICV, ESP tail and padding bytes */
1172 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1173 ml->data_len -= tlen;
1174 mb->pkt_len -= tlen;
1176 /* retrieve SQN for later check */
1179 hlen = l2len + l3len;
1180 op = rte_pktmbuf_mtod(mb, char *);
1181 esph = (struct esp_hdr *)(op + hlen);
1182 *sqn = rte_be_to_cpu_32(esph->seq);
1184 /* cut off ESP header and IV, update L3 header */
1185 np = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);
1186 remove_esph(np, op, hlen);
1187 update_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,
1190 /* reset mbuf packet type */
1191 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
1193 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1194 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1199 * for group of ESP inbound packets perform SQN check and update.
1201 static inline uint16_t
1202 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
1203 struct rte_mbuf *mb[], struct rte_mbuf *dr[], uint16_t num)
1206 struct replay_sqn *rsn;
1208 rsn = rsn_update_start(sa);
1211 for (i = 0; i != num; i++) {
1212 if (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)
1218 rsn_update_finish(sa, rsn);
1223 * process group of ESP inbound tunnel packets.
1226 inb_tun_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1230 struct rte_ipsec_sa *sa;
1232 struct rte_mbuf *dr[num];
1236 /* process packets, extract seq numbers */
1239 for (i = 0; i != num; i++) {
1241 if (esp_inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)
1243 /* bad packet, will drop from furhter processing */
1248 /* update seq # and replay winow */
1249 k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k);
1251 /* handle unprocessed mbufs */
1253 rte_errno = EBADMSG;
1255 mbuf_bulk_copy(mb + k, dr, num - k);
1262 * process group of ESP inbound transport packets.
1265 inb_trs_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1270 struct rte_ipsec_sa *sa;
1271 struct rte_mbuf *dr[num];
1275 /* process packets, extract seq numbers */
1278 for (i = 0; i != num; i++) {
1280 if (esp_inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)
1282 /* bad packet, will drop from furhter processing */
1287 /* update seq # and replay winow */
1288 k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k);
1290 /* handle unprocessed mbufs */
1292 rte_errno = EBADMSG;
1294 mbuf_bulk_copy(mb + k, dr, num - k);
1301 * process outbound packets for SA with ESN support,
1302 * for algorithms that require SQN.hibits to be implictly included
1303 * into digest computation.
1304 * In that case we have to move ICV bytes back to their proper place.
1307 outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1310 uint32_t i, k, icv_len, *icv;
1311 struct rte_mbuf *ml;
1312 struct rte_ipsec_sa *sa;
1313 struct rte_mbuf *dr[num];
1318 icv_len = sa->icv_len;
1320 for (i = 0; i != num; i++) {
1321 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
1322 ml = rte_pktmbuf_lastseg(mb[i]);
1323 icv = rte_pktmbuf_mtod_offset(ml, void *,
1324 ml->data_len - icv_len);
1325 remove_sqh(icv, icv_len);
1331 /* handle unprocessed mbufs */
1333 rte_errno = EBADMSG;
1335 mbuf_bulk_copy(mb + k, dr, num - k);
1342 * simplest pkt process routine:
1343 * all actual processing is already done by HW/PMD,
1344 * just check mbuf ol_flags.
1346 * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
1347 * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
1348 * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
1351 pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1355 struct rte_mbuf *dr[num];
1360 for (i = 0; i != num; i++) {
1361 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
1367 /* handle unprocessed mbufs */
1369 rte_errno = EBADMSG;
1371 mbuf_bulk_copy(mb + k, dr, num - k);
1378 * prepare packets for inline ipsec processing:
1379 * set ol_flags and attach metadata.
1382 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
1383 struct rte_mbuf *mb[], uint16_t num)
1385 uint32_t i, ol_flags;
1387 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
1388 for (i = 0; i != num; i++) {
1390 mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
1392 rte_security_set_pkt_metadata(ss->security.ctx,
1393 ss->security.ses, mb[i], NULL);
1398 * process group of ESP outbound tunnel packets destined for
1399 * INLINE_CRYPTO type of device.
1402 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
1403 struct rte_mbuf *mb[], uint16_t num)
1409 struct rte_ipsec_sa *sa;
1410 union sym_op_data icv;
1411 uint64_t iv[IPSEC_MAX_IV_QWORD];
1412 struct rte_mbuf *dr[num];
1417 sqn = esn_outb_update_sqn(sa, &n);
1419 rte_errno = EOVERFLOW;
1422 for (i = 0; i != n; i++) {
1424 sqc = rte_cpu_to_be_64(sqn + i);
1427 /* try to update the packet itself */
1428 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
1430 /* success, update mbuf fields */
1433 /* failure, put packet into the death-row */
1440 inline_outb_mbuf_prepare(ss, mb, k);
1442 /* copy not processed mbufs beyond good ones */
1443 if (k != n && k != 0)
1444 mbuf_bulk_copy(mb + k, dr, n - k);
1450 * process group of ESP outbound transport packets destined for
1451 * INLINE_CRYPTO type of device.
1454 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
1455 struct rte_mbuf *mb[], uint16_t num)
1458 uint32_t i, k, n, l2, l3;
1461 struct rte_ipsec_sa *sa;
1462 union sym_op_data icv;
1463 uint64_t iv[IPSEC_MAX_IV_QWORD];
1464 struct rte_mbuf *dr[num];
1469 sqn = esn_outb_update_sqn(sa, &n);
1471 rte_errno = EOVERFLOW;
1474 for (i = 0; i != n; i++) {
1479 sqc = rte_cpu_to_be_64(sqn + i);
1482 /* try to update the packet itself */
1483 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
1486 /* success, update mbuf fields */
1489 /* failure, put packet into the death-row */
1496 inline_outb_mbuf_prepare(ss, mb, k);
1498 /* copy not processed mbufs beyond good ones */
1499 if (k != n && k != 0)
1500 mbuf_bulk_copy(mb + k, dr, n - k);
1506 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1507 * actual processing is done by HW/PMD, just set flags and metadata.
1510 outb_inline_proto_process(const struct rte_ipsec_session *ss,
1511 struct rte_mbuf *mb[], uint16_t num)
1513 inline_outb_mbuf_prepare(ss, mb, num);
1518 * Select packet processing function for session on LOOKASIDE_NONE
1522 lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
1523 struct rte_ipsec_sa_pkt_func *pf)
1527 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1528 RTE_IPSEC_SATP_MODE_MASK;
1531 switch (sa->type & msk) {
1532 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1533 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1534 pf->prepare = inb_pkt_prepare;
1535 pf->process = inb_tun_pkt_process;
1537 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1538 pf->prepare = inb_pkt_prepare;
1539 pf->process = inb_trs_pkt_process;
1541 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1542 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1543 pf->prepare = outb_tun_prepare;
1544 pf->process = (sa->sqh_len != 0) ?
1545 outb_sqh_process : pkt_flag_process;
1547 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1548 pf->prepare = outb_trs_prepare;
1549 pf->process = (sa->sqh_len != 0) ?
1550 outb_sqh_process : pkt_flag_process;
1560 * Select packet processing function for session on INLINE_CRYPTO
1564 inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
1565 struct rte_ipsec_sa_pkt_func *pf)
1569 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1570 RTE_IPSEC_SATP_MODE_MASK;
1573 switch (sa->type & msk) {
1574 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1575 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1576 pf->process = inb_tun_pkt_process;
1578 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1579 pf->process = inb_trs_pkt_process;
1581 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1582 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1583 pf->process = inline_outb_tun_pkt_process;
1585 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1586 pf->process = inline_outb_trs_pkt_process;
1596 * Select packet processing function for given session based on SA parameters
1597 * and type of associated with the session device.
1600 ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
1601 const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
1606 pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 };
1609 case RTE_SECURITY_ACTION_TYPE_NONE:
1610 rc = lksd_none_pkt_func_select(sa, pf);
1612 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1613 rc = inline_crypto_pkt_func_select(sa, pf);
1615 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1616 if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
1617 RTE_IPSEC_SATP_DIR_IB)
1618 pf->process = pkt_flag_process;
1620 pf->process = outb_inline_proto_process;
1622 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
1623 pf->prepare = lksd_proto_prepare;
1624 pf->process = pkt_flag_process;