1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
9 #include <rte_cryptodev.h>
12 #include "ipsec_sqn.h"
17 #define MBUF_MAX_L2_LEN RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
18 #define MBUF_MAX_L3_LEN RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t)
20 /* some helper structures */
22 struct rte_crypto_auth_xform *auth;
23 struct rte_crypto_cipher_xform *cipher;
24 struct rte_crypto_aead_xform *aead;
28 * helper routine, fills internal crypto_xform structure.
31 fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
32 const struct rte_ipsec_sa_prm *prm)
34 struct rte_crypto_sym_xform *xf, *xfn;
36 memset(xform, 0, sizeof(*xform));
38 xf = prm->crypto_xform;
44 /* for AEAD just one xform required */
45 if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
48 xform->aead = &xf->aead;
50 * CIPHER+AUTH xforms are expected in strict order,
51 * depending on SA direction:
52 * inbound: AUTH+CIPHER
53 * outbound: CIPHER+AUTH
55 } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
57 /* wrong order or no cipher */
58 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
59 xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
62 xform->auth = &xf->auth;
63 xform->cipher = &xfn->cipher;
67 /* wrong order or no auth */
68 if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
69 xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
72 xform->cipher = &xf->cipher;
73 xform->auth = &xfn->auth;
79 uint64_t __rte_experimental
80 rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
86 ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
93 if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
96 * RFC 4303 recommends 64 as minimum window size.
97 * there is no point to use ESN mode without SQN window,
98 * so make sure we have at least 64 window when ESN is enalbed.
100 wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
101 RTE_IPSEC_SATP_ESN_DISABLE) ?
102 wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
104 n = replay_num_bucket(wsz);
107 if (n > WINDOW_BUCKET_MAX)
114 if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
115 sz *= REPLAY_SQN_NUM;
117 sz += sizeof(struct rte_ipsec_sa);
121 void __rte_experimental
122 rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
124 memset(sa, 0, sa->size);
128 * Determine expected SA type based on input parameters.
131 fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
137 if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
138 tp |= RTE_IPSEC_SATP_PROTO_AH;
139 else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
140 tp |= RTE_IPSEC_SATP_PROTO_ESP;
144 if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
145 tp |= RTE_IPSEC_SATP_DIR_OB;
146 else if (prm->ipsec_xform.direction ==
147 RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
148 tp |= RTE_IPSEC_SATP_DIR_IB;
152 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
153 if (prm->ipsec_xform.tunnel.type ==
154 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
155 tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
156 else if (prm->ipsec_xform.tunnel.type ==
157 RTE_SECURITY_IPSEC_TUNNEL_IPV6)
158 tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
162 if (prm->tun.next_proto == IPPROTO_IPIP)
163 tp |= RTE_IPSEC_SATP_IPV4;
164 else if (prm->tun.next_proto == IPPROTO_IPV6)
165 tp |= RTE_IPSEC_SATP_IPV6;
168 } else if (prm->ipsec_xform.mode ==
169 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
170 tp |= RTE_IPSEC_SATP_MODE_TRANS;
171 if (prm->trs.proto == IPPROTO_IPIP)
172 tp |= RTE_IPSEC_SATP_IPV4;
173 else if (prm->trs.proto == IPPROTO_IPV6)
174 tp |= RTE_IPSEC_SATP_IPV6;
180 /* check for ESN flag */
181 if (prm->ipsec_xform.options.esn == 0)
182 tp |= RTE_IPSEC_SATP_ESN_DISABLE;
184 tp |= RTE_IPSEC_SATP_ESN_ENABLE;
186 /* interpret flags */
187 if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
188 tp |= RTE_IPSEC_SATP_SQN_ATOM;
190 tp |= RTE_IPSEC_SATP_SQN_RAW;
197 * Init ESP inbound specific things.
200 esp_inb_init(struct rte_ipsec_sa *sa)
202 /* these params may differ with new algorithms support */
203 sa->ctp.auth.offset = 0;
204 sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
205 sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
206 sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
210 * Init ESP inbound tunnel specific things.
213 esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
215 sa->proto = prm->tun.next_proto;
220 * Init ESP outbound specific things.
223 esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
227 sa->sqn.outb.raw = 1;
229 /* these params may differ with new algorithms support */
230 sa->ctp.auth.offset = hlen;
231 sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
233 algo_type = sa->algo_type;
236 case ALGO_TYPE_AES_GCM:
237 case ALGO_TYPE_AES_CTR:
239 sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
241 sa->ctp.cipher.length = 0;
243 case ALGO_TYPE_AES_CBC:
244 case ALGO_TYPE_3DES_CBC:
245 sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
246 sa->ctp.cipher.length = sa->iv_len;
252 * Init ESP outbound tunnel specific things.
255 esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
257 sa->proto = prm->tun.next_proto;
258 sa->hdr_len = prm->tun.hdr_len;
259 sa->hdr_l3_off = prm->tun.hdr_l3_off;
261 /* update l2_len and l3_len fields for outbound mbuf */
262 sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
263 sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
265 memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
267 esp_outb_init(sa, sa->hdr_len);
271 * helper function, init SA structure.
274 esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
275 const struct crypto_xform *cxf)
277 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
278 RTE_IPSEC_SATP_MODE_MASK;
280 if (cxf->aead != NULL) {
281 switch (cxf->aead->algo) {
282 case RTE_CRYPTO_AEAD_AES_GCM:
284 sa->aad_len = sizeof(struct aead_gcm_aad);
285 sa->icv_len = cxf->aead->digest_length;
286 sa->iv_ofs = cxf->aead->iv.offset;
287 sa->iv_len = sizeof(uint64_t);
288 sa->pad_align = IPSEC_PAD_AES_GCM;
289 sa->algo_type = ALGO_TYPE_AES_GCM;
295 sa->icv_len = cxf->auth->digest_length;
296 sa->iv_ofs = cxf->cipher->iv.offset;
297 sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
299 switch (cxf->cipher->algo) {
300 case RTE_CRYPTO_CIPHER_NULL:
301 sa->pad_align = IPSEC_PAD_NULL;
303 sa->algo_type = ALGO_TYPE_NULL;
306 case RTE_CRYPTO_CIPHER_AES_CBC:
307 sa->pad_align = IPSEC_PAD_AES_CBC;
308 sa->iv_len = IPSEC_MAX_IV_SIZE;
309 sa->algo_type = ALGO_TYPE_AES_CBC;
312 case RTE_CRYPTO_CIPHER_AES_CTR:
314 sa->pad_align = IPSEC_PAD_AES_CTR;
315 sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
316 sa->algo_type = ALGO_TYPE_AES_CTR;
319 case RTE_CRYPTO_CIPHER_3DES_CBC:
321 sa->pad_align = IPSEC_PAD_3DES_CBC;
322 sa->iv_len = IPSEC_3DES_IV_SIZE;
323 sa->algo_type = ALGO_TYPE_3DES_CBC;
331 sa->udata = prm->userdata;
332 sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
333 sa->salt = prm->ipsec_xform.salt;
335 /* preserve all values except l2_len and l3_len */
337 ~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
340 switch (sa->type & msk) {
341 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
342 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
343 esp_inb_tun_init(sa, prm);
345 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
348 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
349 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
350 esp_outb_tun_init(sa, prm);
352 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
353 esp_outb_init(sa, 0);
361 * helper function, init SA replay structure.
364 fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket)
366 sa->replay.win_sz = wnd_sz;
367 sa->replay.nb_bucket = nb_bucket;
368 sa->replay.bucket_index_mask = nb_bucket - 1;
369 sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
370 if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
371 sa->sqn.inb.rsn[1] = (struct replay_sqn *)
372 ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
375 int __rte_experimental
376 rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
385 /* determine SA type */
386 rc = fill_sa_type(prm, &type);
390 /* determine required size */
391 wsz = prm->replay_win_sz;
392 return ipsec_sa_size(type, &wsz, &nb);
395 int __rte_experimental
396 rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
402 struct crypto_xform cxf;
404 if (sa == NULL || prm == NULL)
407 /* determine SA type */
408 rc = fill_sa_type(prm, &type);
412 /* determine required size */
413 wsz = prm->replay_win_sz;
414 sz = ipsec_sa_size(type, &wsz, &nb);
417 else if (size < (uint32_t)sz)
420 /* only esp is supported right now */
421 if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
424 if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
425 prm->tun.hdr_len > sizeof(sa->hdr))
428 rc = fill_crypto_xform(&cxf, type, prm);
438 /* check for ESN flag */
439 sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
440 UINT32_MAX : UINT64_MAX;
442 rc = esp_sa_init(sa, prm, &cxf);
444 rte_ipsec_sa_fini(sa);
446 /* fill replay window related fields */
448 fill_sa_replay(sa, wsz, nb);
454 mbuf_bulk_copy(struct rte_mbuf *dst[], struct rte_mbuf * const src[],
459 for (i = 0; i != num; i++)
464 * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.
467 lksd_none_cop_prepare(const struct rte_ipsec_session *ss,
468 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
471 struct rte_crypto_sym_op *sop;
473 for (i = 0; i != num; i++) {
475 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
476 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
477 cop[i]->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
479 __rte_crypto_sym_op_attach_sym_session(sop, ss->crypto.ses);
484 * setup crypto op and crypto sym op for ESP outbound packet.
487 esp_outb_cop_prepare(struct rte_crypto_op *cop,
488 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
489 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
491 struct rte_crypto_sym_op *sop;
492 struct aead_gcm_iv *gcm;
493 struct aesctr_cnt_blk *ctr;
494 uint8_t algo_type = sa->algo_type;
496 /* fill sym op fields */
500 case ALGO_TYPE_AES_CBC:
501 /* Cipher-Auth (AES-CBC *) case */
502 case ALGO_TYPE_3DES_CBC:
503 /* Cipher-Auth (3DES-CBC *) case */
506 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
507 sop->cipher.data.length = sa->ctp.cipher.length + plen;
508 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
509 sop->auth.data.length = sa->ctp.auth.length + plen;
510 sop->auth.digest.data = icv->va;
511 sop->auth.digest.phys_addr = icv->pa;
513 case ALGO_TYPE_AES_GCM:
514 /* AEAD (AES_GCM) case */
515 sop->aead.data.offset = sa->ctp.cipher.offset + hlen;
516 sop->aead.data.length = sa->ctp.cipher.length + plen;
517 sop->aead.digest.data = icv->va;
518 sop->aead.digest.phys_addr = icv->pa;
519 sop->aead.aad.data = icv->va + sa->icv_len;
520 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
522 /* fill AAD IV (located inside crypto op) */
523 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
525 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
527 case ALGO_TYPE_AES_CTR:
528 /* Cipher-Auth (AES-CTR *) case */
529 sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
530 sop->cipher.data.length = sa->ctp.cipher.length + plen;
531 sop->auth.data.offset = sa->ctp.auth.offset + hlen;
532 sop->auth.data.length = sa->ctp.auth.length + plen;
533 sop->auth.digest.data = icv->va;
534 sop->auth.digest.phys_addr = icv->pa;
536 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
538 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
546 * setup/update packet data and metadata for ESP outbound tunnel case.
548 static inline int32_t
549 esp_outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
550 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
551 union sym_op_data *icv)
553 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
555 struct esp_hdr *esph;
556 struct esp_tail *espt;
560 /* calculate extra header space required */
561 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
563 /* size of ipsec protected data */
565 plen = mb->pkt_len - l2len;
567 /* number of bytes to encrypt */
568 clen = plen + sizeof(*espt);
569 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
571 /* pad length + esp tail */
573 tlen = pdlen + sa->icv_len;
575 /* do append and prepend */
576 ml = rte_pktmbuf_lastseg(mb);
577 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
581 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
586 pdofs = ml->data_len;
587 ml->data_len += tlen;
589 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
591 /* update pkt l2/l3 len */
592 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
595 /* copy tunnel pkt header */
596 rte_memcpy(ph, sa->hdr, sa->hdr_len);
598 /* update original and new ip header fields */
599 update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
602 /* update spi, seqn and iv */
603 esph = (struct esp_hdr *)(ph + sa->hdr_len);
604 iv = (uint64_t *)(esph + 1);
605 copy_iv(iv, ivp, sa->iv_len);
608 esph->seq = sqn_low32(sqc);
611 pdofs += pdlen + sa->sqh_len;
614 pdlen -= sizeof(*espt);
616 /* copy padding data */
617 rte_memcpy(pt, esp_pad_bytes, pdlen);
619 /* update esp trailer */
620 espt = (struct esp_tail *)(pt + pdlen);
621 espt->pad_len = pdlen;
622 espt->next_proto = sa->proto;
624 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
625 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
631 * for pure cryptodev (lookaside none) depending on SA settings,
632 * we might have to write some extra data to the packet.
635 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
636 const union sym_op_data *icv)
639 struct aead_gcm_aad *aad;
640 uint8_t algo_type = sa->algo_type;
642 /* insert SQN.hi between ESP trailer and ICV */
643 if (sa->sqh_len != 0) {
644 psqh = (uint32_t *)(icv->va - sa->sqh_len);
645 psqh[0] = sqn_hi32(sqc);
649 * fill IV and AAD fields, if any (aad fields are placed after icv),
650 * right now we support only one AEAD algorithm: AES-GCM .
652 if (algo_type == ALGO_TYPE_AES_GCM) {
653 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
654 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
659 * setup/update packets and crypto ops for ESP outbound tunnel case.
662 outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
663 struct rte_crypto_op *cop[], uint16_t num)
669 struct rte_ipsec_sa *sa;
670 union sym_op_data icv;
671 uint64_t iv[IPSEC_MAX_IV_QWORD];
672 struct rte_mbuf *dr[num];
677 sqn = esn_outb_update_sqn(sa, &n);
679 rte_errno = EOVERFLOW;
682 for (i = 0; i != n; i++) {
684 sqc = rte_cpu_to_be_64(sqn + i);
687 /* try to update the packet itself */
688 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
690 /* success, setup crypto op */
693 outb_pkt_xprepare(sa, sqc, &icv);
694 esp_outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
696 /* failure, put packet into the death-row */
704 lksd_none_cop_prepare(ss, mb, cop, k);
706 /* copy not prepared mbufs beyond good ones */
707 if (k != n && k != 0)
708 mbuf_bulk_copy(mb + k, dr, n - k);
714 * setup/update packet data and metadata for ESP outbound transport case.
716 static inline int32_t
717 esp_outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
718 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
719 uint32_t l2len, uint32_t l3len, union sym_op_data *icv)
722 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
724 struct esp_hdr *esph;
725 struct esp_tail *espt;
729 uhlen = l2len + l3len;
730 plen = mb->pkt_len - uhlen;
732 /* calculate extra header space required */
733 hlen = sa->iv_len + sizeof(*esph);
735 /* number of bytes to encrypt */
736 clen = plen + sizeof(*espt);
737 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
739 /* pad length + esp tail */
741 tlen = pdlen + sa->icv_len;
743 /* do append and insert */
744 ml = rte_pktmbuf_lastseg(mb);
745 if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
748 /* prepend space for ESP header */
749 ph = rte_pktmbuf_prepend(mb, hlen);
754 pdofs = ml->data_len;
755 ml->data_len += tlen;
757 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
759 /* shift L2/L3 headers */
760 insert_esph(ph, ph + hlen, uhlen);
762 /* update ip header fields */
763 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
766 /* update spi, seqn and iv */
767 esph = (struct esp_hdr *)(ph + uhlen);
768 iv = (uint64_t *)(esph + 1);
769 copy_iv(iv, ivp, sa->iv_len);
772 esph->seq = sqn_low32(sqc);
775 pdofs += pdlen + sa->sqh_len;
778 pdlen -= sizeof(*espt);
780 /* copy padding data */
781 rte_memcpy(pt, esp_pad_bytes, pdlen);
783 /* update esp trailer */
784 espt = (struct esp_tail *)(pt + pdlen);
785 espt->pad_len = pdlen;
786 espt->next_proto = np;
788 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
789 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
795 * setup/update packets and crypto ops for ESP outbound transport case.
798 outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
799 struct rte_crypto_op *cop[], uint16_t num)
802 uint32_t i, k, n, l2, l3;
805 struct rte_ipsec_sa *sa;
806 union sym_op_data icv;
807 uint64_t iv[IPSEC_MAX_IV_QWORD];
808 struct rte_mbuf *dr[num];
813 sqn = esn_outb_update_sqn(sa, &n);
815 rte_errno = EOVERFLOW;
818 for (i = 0; i != n; i++) {
823 sqc = rte_cpu_to_be_64(sqn + i);
826 /* try to update the packet itself */
827 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
830 /* success, setup crypto op */
833 outb_pkt_xprepare(sa, sqc, &icv);
834 esp_outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
836 /* failure, put packet into the death-row */
844 lksd_none_cop_prepare(ss, mb, cop, k);
846 /* copy not prepared mbufs beyond good ones */
847 if (k != n && k != 0)
848 mbuf_bulk_copy(mb + k, dr, n - k);
854 * setup crypto op and crypto sym op for ESP inbound tunnel packet.
856 static inline int32_t
857 esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,
858 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
859 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
861 struct rte_crypto_sym_op *sop;
862 struct aead_gcm_iv *gcm;
863 struct aesctr_cnt_blk *ctr;
866 uint8_t algo_type = sa->algo_type;
868 clen = plen - sa->ctp.cipher.length;
869 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
872 /* fill sym op fields */
876 case ALGO_TYPE_AES_GCM:
877 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
878 sop->aead.data.length = clen;
879 sop->aead.digest.data = icv->va;
880 sop->aead.digest.phys_addr = icv->pa;
881 sop->aead.aad.data = icv->va + sa->icv_len;
882 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
884 /* fill AAD IV (located inside crypto op) */
885 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
887 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
888 pofs + sizeof(struct esp_hdr));
889 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
891 case ALGO_TYPE_AES_CBC:
892 case ALGO_TYPE_3DES_CBC:
893 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
894 sop->cipher.data.length = clen;
895 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
896 sop->auth.data.length = plen - sa->ctp.auth.length;
897 sop->auth.digest.data = icv->va;
898 sop->auth.digest.phys_addr = icv->pa;
900 /* copy iv from the input packet to the cop */
901 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
902 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
903 pofs + sizeof(struct esp_hdr));
904 copy_iv(ivc, ivp, sa->iv_len);
906 case ALGO_TYPE_AES_CTR:
907 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
908 sop->cipher.data.length = clen;
909 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
910 sop->auth.data.length = plen - sa->ctp.auth.length;
911 sop->auth.digest.data = icv->va;
912 sop->auth.digest.phys_addr = icv->pa;
914 /* copy iv from the input packet to the cop */
915 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
917 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
918 pofs + sizeof(struct esp_hdr));
919 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
922 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
923 sop->cipher.data.length = clen;
924 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
925 sop->auth.data.length = plen - sa->ctp.auth.length;
926 sop->auth.digest.data = icv->va;
927 sop->auth.digest.phys_addr = icv->pa;
938 * for pure cryptodev (lookaside none) depending on SA settings,
939 * we might have to write some extra data to the packet.
942 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
943 const union sym_op_data *icv)
945 struct aead_gcm_aad *aad;
947 /* insert SQN.hi between ESP trailer and ICV */
948 if (sa->sqh_len != 0)
949 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
952 * fill AAD fields, if any (aad fields are placed after icv),
953 * right now we support only one AEAD algorithm: AES-GCM.
955 if (sa->aad_len != 0) {
956 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
957 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
962 * setup/update packet data and metadata for ESP inbound tunnel case.
964 static inline int32_t
965 esp_inb_tun_pkt_prepare(const struct rte_ipsec_sa *sa,
966 const struct replay_sqn *rsn, struct rte_mbuf *mb,
967 uint32_t hlen, union sym_op_data *icv)
971 uint32_t icv_ofs, plen;
973 struct esp_hdr *esph;
975 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
978 * retrieve and reconstruct SQN, then check it, then
979 * convert it back into network byte order.
981 sqn = rte_be_to_cpu_32(esph->seq);
983 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
985 rc = esn_inb_check_sqn(rsn, sa, sqn);
989 sqn = rte_cpu_to_be_64(sqn);
991 /* start packet manipulation */
995 ml = rte_pktmbuf_lastseg(mb);
996 icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
998 /* we have to allocate space for AAD somewhere,
999 * right now - just use free trailing space at the last segment.
1000 * Would probably be more convenient to reserve space for AAD
1001 * inside rte_crypto_op itself
1002 * (again for IV space is already reserved inside cop).
1004 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
1007 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
1008 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
1010 inb_pkt_xprepare(sa, sqn, icv);
1015 * setup/update packets and crypto ops for ESP inbound case.
1018 inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1019 struct rte_crypto_op *cop[], uint16_t num)
1023 struct rte_ipsec_sa *sa;
1024 struct replay_sqn *rsn;
1025 union sym_op_data icv;
1026 struct rte_mbuf *dr[num];
1029 rsn = rsn_acquire(sa);
1032 for (i = 0; i != num; i++) {
1034 hl = mb[i]->l2_len + mb[i]->l3_len;
1035 rc = esp_inb_tun_pkt_prepare(sa, rsn, mb[i], hl, &icv);
1037 rc = esp_inb_tun_cop_prepare(cop[k], sa, mb[i], &icv,
1048 rsn_release(sa, rsn);
1051 lksd_none_cop_prepare(ss, mb, cop, k);
1053 /* copy not prepared mbufs beyond good ones */
1054 if (k != num && k != 0)
1055 mbuf_bulk_copy(mb + k, dr, num - k);
1061 * setup crypto ops for LOOKASIDE_PROTO type of devices.
1064 lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
1065 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
1068 struct rte_crypto_sym_op *sop;
1070 for (i = 0; i != num; i++) {
1072 cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1073 cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1074 cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
1076 __rte_security_attach_session(sop, ss->security.ses);
1081 * setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
1082 * Note that for LOOKASIDE_PROTO all packet modifications will be
1083 * performed by PMD/HW.
1084 * SW has only to prepare crypto op.
1087 lksd_proto_prepare(const struct rte_ipsec_session *ss,
1088 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
1090 lksd_proto_cop_prepare(ss, mb, cop, num);
1095 * process ESP inbound tunnel packet.
1098 esp_inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
1101 uint32_t hlen, icv_len, tlen;
1102 struct esp_hdr *esph;
1103 struct esp_tail *espt;
1104 struct rte_mbuf *ml;
1107 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1110 icv_len = sa->icv_len;
1112 ml = rte_pktmbuf_lastseg(mb);
1113 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1114 ml->data_len - icv_len - sizeof(*espt));
1117 * check padding and next proto.
1118 * return an error if something is wrong.
1120 pd = (char *)espt - espt->pad_len;
1121 if (espt->next_proto != sa->proto ||
1122 memcmp(pd, esp_pad_bytes, espt->pad_len))
1125 /* cut of ICV, ESP tail and padding bytes */
1126 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1127 ml->data_len -= tlen;
1128 mb->pkt_len -= tlen;
1130 /* cut of L2/L3 headers, ESP header and IV */
1131 hlen = mb->l2_len + mb->l3_len;
1132 esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
1133 rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);
1135 /* retrieve SQN for later check */
1136 *sqn = rte_be_to_cpu_32(esph->seq);
1138 /* reset mbuf metatdata: L2/L3 len, packet type */
1139 mb->packet_type = RTE_PTYPE_UNKNOWN;
1140 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
1143 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1144 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1149 * process ESP inbound transport packet.
1152 esp_inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
1155 uint32_t hlen, icv_len, l2len, l3len, tlen;
1156 struct esp_hdr *esph;
1157 struct esp_tail *espt;
1158 struct rte_mbuf *ml;
1161 if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
1164 icv_len = sa->icv_len;
1166 ml = rte_pktmbuf_lastseg(mb);
1167 espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
1168 ml->data_len - icv_len - sizeof(*espt));
1170 /* check padding, return an error if something is wrong. */
1171 pd = (char *)espt - espt->pad_len;
1172 if (memcmp(pd, esp_pad_bytes, espt->pad_len))
1175 /* cut of ICV, ESP tail and padding bytes */
1176 tlen = icv_len + sizeof(*espt) + espt->pad_len;
1177 ml->data_len -= tlen;
1178 mb->pkt_len -= tlen;
1180 /* retrieve SQN for later check */
1183 hlen = l2len + l3len;
1184 op = rte_pktmbuf_mtod(mb, char *);
1185 esph = (struct esp_hdr *)(op + hlen);
1186 *sqn = rte_be_to_cpu_32(esph->seq);
1188 /* cut off ESP header and IV, update L3 header */
1189 np = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);
1190 remove_esph(np, op, hlen);
1191 update_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,
1194 /* reset mbuf packet type */
1195 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
1197 /* clear the PKT_RX_SEC_OFFLOAD flag if set */
1198 mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
1203 * for group of ESP inbound packets perform SQN check and update.
1205 static inline uint16_t
1206 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
1207 struct rte_mbuf *mb[], struct rte_mbuf *dr[], uint16_t num)
1210 struct replay_sqn *rsn;
1212 rsn = rsn_update_start(sa);
1215 for (i = 0; i != num; i++) {
1216 if (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)
1222 rsn_update_finish(sa, rsn);
1227 * process group of ESP inbound tunnel packets.
1230 inb_tun_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1234 struct rte_ipsec_sa *sa;
1236 struct rte_mbuf *dr[num];
1240 /* process packets, extract seq numbers */
1243 for (i = 0; i != num; i++) {
1245 if (esp_inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)
1247 /* bad packet, will drop from furhter processing */
1252 /* update seq # and replay winow */
1253 k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k);
1255 /* handle unprocessed mbufs */
1257 rte_errno = EBADMSG;
1259 mbuf_bulk_copy(mb + k, dr, num - k);
1266 * process group of ESP inbound transport packets.
1269 inb_trs_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1274 struct rte_ipsec_sa *sa;
1275 struct rte_mbuf *dr[num];
1279 /* process packets, extract seq numbers */
1282 for (i = 0; i != num; i++) {
1284 if (esp_inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)
1286 /* bad packet, will drop from furhter processing */
1291 /* update seq # and replay winow */
1292 k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k);
1294 /* handle unprocessed mbufs */
1296 rte_errno = EBADMSG;
1298 mbuf_bulk_copy(mb + k, dr, num - k);
1305 * process outbound packets for SA with ESN support,
1306 * for algorithms that require SQN.hibits to be implictly included
1307 * into digest computation.
1308 * In that case we have to move ICV bytes back to their proper place.
1311 outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1314 uint32_t i, k, icv_len, *icv;
1315 struct rte_mbuf *ml;
1316 struct rte_ipsec_sa *sa;
1317 struct rte_mbuf *dr[num];
1322 icv_len = sa->icv_len;
1324 for (i = 0; i != num; i++) {
1325 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
1326 ml = rte_pktmbuf_lastseg(mb[i]);
1327 icv = rte_pktmbuf_mtod_offset(ml, void *,
1328 ml->data_len - icv_len);
1329 remove_sqh(icv, icv_len);
1335 /* handle unprocessed mbufs */
1337 rte_errno = EBADMSG;
1339 mbuf_bulk_copy(mb + k, dr, num - k);
1346 * simplest pkt process routine:
1347 * all actual processing is already done by HW/PMD,
1348 * just check mbuf ol_flags.
1350 * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
1351 * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
1352 * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
1355 pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
1359 struct rte_mbuf *dr[num];
1364 for (i = 0; i != num; i++) {
1365 if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
1371 /* handle unprocessed mbufs */
1373 rte_errno = EBADMSG;
1375 mbuf_bulk_copy(mb + k, dr, num - k);
1382 * prepare packets for inline ipsec processing:
1383 * set ol_flags and attach metadata.
1386 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
1387 struct rte_mbuf *mb[], uint16_t num)
1389 uint32_t i, ol_flags;
1391 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
1392 for (i = 0; i != num; i++) {
1394 mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
1396 rte_security_set_pkt_metadata(ss->security.ctx,
1397 ss->security.ses, mb[i], NULL);
1402 * process group of ESP outbound tunnel packets destined for
1403 * INLINE_CRYPTO type of device.
1406 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
1407 struct rte_mbuf *mb[], uint16_t num)
1413 struct rte_ipsec_sa *sa;
1414 union sym_op_data icv;
1415 uint64_t iv[IPSEC_MAX_IV_QWORD];
1416 struct rte_mbuf *dr[num];
1421 sqn = esn_outb_update_sqn(sa, &n);
1423 rte_errno = EOVERFLOW;
1426 for (i = 0; i != n; i++) {
1428 sqc = rte_cpu_to_be_64(sqn + i);
1431 /* try to update the packet itself */
1432 rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
1434 /* success, update mbuf fields */
1437 /* failure, put packet into the death-row */
1444 inline_outb_mbuf_prepare(ss, mb, k);
1446 /* copy not processed mbufs beyond good ones */
1447 if (k != n && k != 0)
1448 mbuf_bulk_copy(mb + k, dr, n - k);
1454 * process group of ESP outbound transport packets destined for
1455 * INLINE_CRYPTO type of device.
1458 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
1459 struct rte_mbuf *mb[], uint16_t num)
1462 uint32_t i, k, n, l2, l3;
1465 struct rte_ipsec_sa *sa;
1466 union sym_op_data icv;
1467 uint64_t iv[IPSEC_MAX_IV_QWORD];
1468 struct rte_mbuf *dr[num];
1473 sqn = esn_outb_update_sqn(sa, &n);
1475 rte_errno = EOVERFLOW;
1478 for (i = 0; i != n; i++) {
1483 sqc = rte_cpu_to_be_64(sqn + i);
1486 /* try to update the packet itself */
1487 rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
1490 /* success, update mbuf fields */
1493 /* failure, put packet into the death-row */
1500 inline_outb_mbuf_prepare(ss, mb, k);
1502 /* copy not processed mbufs beyond good ones */
1503 if (k != n && k != 0)
1504 mbuf_bulk_copy(mb + k, dr, n - k);
1510 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1511 * actual processing is done by HW/PMD, just set flags and metadata.
1514 outb_inline_proto_process(const struct rte_ipsec_session *ss,
1515 struct rte_mbuf *mb[], uint16_t num)
1517 inline_outb_mbuf_prepare(ss, mb, num);
1522 * Select packet processing function for session on LOOKASIDE_NONE
1526 lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
1527 struct rte_ipsec_sa_pkt_func *pf)
1531 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1532 RTE_IPSEC_SATP_MODE_MASK;
1535 switch (sa->type & msk) {
1536 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1537 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1538 pf->prepare = inb_pkt_prepare;
1539 pf->process = inb_tun_pkt_process;
1541 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1542 pf->prepare = inb_pkt_prepare;
1543 pf->process = inb_trs_pkt_process;
1545 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1546 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1547 pf->prepare = outb_tun_prepare;
1548 pf->process = (sa->sqh_len != 0) ?
1549 outb_sqh_process : pkt_flag_process;
1551 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1552 pf->prepare = outb_trs_prepare;
1553 pf->process = (sa->sqh_len != 0) ?
1554 outb_sqh_process : pkt_flag_process;
1564 * Select packet processing function for session on INLINE_CRYPTO
1568 inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
1569 struct rte_ipsec_sa_pkt_func *pf)
1573 static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
1574 RTE_IPSEC_SATP_MODE_MASK;
1577 switch (sa->type & msk) {
1578 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
1579 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
1580 pf->process = inb_tun_pkt_process;
1582 case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
1583 pf->process = inb_trs_pkt_process;
1585 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
1586 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
1587 pf->process = inline_outb_tun_pkt_process;
1589 case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
1590 pf->process = inline_outb_trs_pkt_process;
1600 * Select packet processing function for given session based on SA parameters
1601 * and type of associated with the session device.
1604 ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
1605 const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
1610 pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 };
1613 case RTE_SECURITY_ACTION_TYPE_NONE:
1614 rc = lksd_none_pkt_func_select(sa, pf);
1616 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1617 rc = inline_crypto_pkt_func_select(sa, pf);
1619 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1620 if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
1621 RTE_IPSEC_SATP_DIR_IB)
1622 pf->process = pkt_flag_process;
1624 pf->process = outb_inline_proto_process;
1626 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
1627 pf->prepare = lksd_proto_prepare;
1628 pf->process = pkt_flag_process;