1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2020 Intel Corporation
8 #include <rte_cryptodev.h>
11 #include "ipsec_sqn.h"
17 typedef uint16_t (*esp_inb_process_t)(struct rte_ipsec_sa *sa,
18 struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
22 * helper function to fill crypto_sym op for cipher+auth algorithms.
23 * used by inb_cop_prepare(), see below.
26 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
27 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
28 uint32_t pofs, uint32_t plen)
30 sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
31 sop->cipher.data.length = plen - sa->ctp.cipher.length;
32 sop->auth.data.offset = pofs + sa->ctp.auth.offset;
33 sop->auth.data.length = plen - sa->ctp.auth.length;
34 sop->auth.digest.data = icv->va;
35 sop->auth.digest.phys_addr = icv->pa;
39 * helper function to fill crypto_sym op for aead algorithms
40 * used by inb_cop_prepare(), see below.
43 sop_aead_prepare(struct rte_crypto_sym_op *sop,
44 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
45 uint32_t pofs, uint32_t plen)
47 sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
48 sop->aead.data.length = plen - sa->ctp.cipher.length;
49 sop->aead.digest.data = icv->va;
50 sop->aead.digest.phys_addr = icv->pa;
51 sop->aead.aad.data = icv->va + sa->icv_len;
52 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
56 * setup crypto op and crypto sym op for ESP inbound packet.
59 inb_cop_prepare(struct rte_crypto_op *cop,
60 const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
61 const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
63 struct rte_crypto_sym_op *sop;
64 struct aead_gcm_iv *gcm;
65 struct aead_ccm_iv *ccm;
66 struct aead_chacha20_poly1305_iv *chacha20_poly1305;
67 struct aesctr_cnt_blk *ctr;
72 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
73 pofs + sizeof(struct rte_esp_hdr));
75 /* fill sym op fields */
79 case ALGO_TYPE_AES_GCM:
80 sop_aead_prepare(sop, sa, icv, pofs, plen);
82 /* fill AAD IV (located inside crypto op) */
83 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
85 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
87 case ALGO_TYPE_AES_CCM:
88 sop_aead_prepare(sop, sa, icv, pofs, plen);
90 /* fill AAD IV (located inside crypto op) */
91 ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
93 aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
95 case ALGO_TYPE_CHACHA20_POLY1305:
96 sop_aead_prepare(sop, sa, icv, pofs, plen);
98 /* fill AAD IV (located inside crypto op) */
99 chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
100 struct aead_chacha20_poly1305_iv *,
102 aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
105 case ALGO_TYPE_AES_CBC:
106 case ALGO_TYPE_3DES_CBC:
107 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
109 /* copy iv from the input packet to the cop */
110 ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
111 copy_iv(ivc, ivp, sa->iv_len);
113 case ALGO_TYPE_AES_GMAC:
114 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
116 /* fill AAD IV (located inside crypto op) */
117 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
119 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
121 case ALGO_TYPE_AES_CTR:
122 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
124 /* fill CTR block (located inside crypto op) */
125 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
127 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
130 sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
135 static inline uint32_t
136 inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
137 uint32_t *pofs, uint32_t plen, void *iv)
139 struct aead_gcm_iv *gcm;
140 struct aead_ccm_iv *ccm;
141 struct aead_chacha20_poly1305_iv *chacha20_poly1305;
142 struct aesctr_cnt_blk *ctr;
146 ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
147 *pofs + sizeof(struct rte_esp_hdr));
150 switch (sa->algo_type) {
151 case ALGO_TYPE_AES_GCM:
152 case ALGO_TYPE_AES_GMAC:
153 gcm = (struct aead_gcm_iv *)iv;
154 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
156 case ALGO_TYPE_AES_CCM:
157 ccm = (struct aead_ccm_iv *)iv;
158 aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
160 case ALGO_TYPE_CHACHA20_POLY1305:
161 chacha20_poly1305 = (struct aead_chacha20_poly1305_iv *)iv;
162 aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
165 case ALGO_TYPE_AES_CBC:
166 case ALGO_TYPE_3DES_CBC:
167 copy_iv(iv, ivp, sa->iv_len);
169 case ALGO_TYPE_AES_CTR:
170 ctr = (struct aesctr_cnt_blk *)iv;
171 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
175 *pofs += sa->ctp.auth.offset;
176 clen = plen - sa->ctp.auth.length;
181 * Helper function for prepare() to deal with situation when
182 * ICV is spread by two segments. Tries to move ICV completely into the
185 static struct rte_mbuf *
186 move_icv(struct rte_mbuf *ml, uint32_t ofs)
194 n = ml->data_len - ofs;
196 prev = rte_pktmbuf_mtod_offset(ml, const void *, ofs);
197 new = rte_pktmbuf_prepend(ms, n);
201 /* move n ICV bytes from ml into ms */
202 rte_memcpy(new, prev, n);
209 * for pure cryptodev (lookaside none) depending on SA settings,
210 * we might have to write some extra data to the packet.
213 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
214 const union sym_op_data *icv)
216 struct aead_gcm_aad *aad;
217 struct aead_ccm_aad *caad;
218 struct aead_chacha20_poly1305_aad *chacha_aad;
220 /* insert SQN.hi between ESP trailer and ICV */
221 if (sa->sqh_len != 0)
222 insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
225 * fill AAD fields, if any (aad fields are placed after icv),
226 * right now we support only one AEAD algorithm: AES-GCM.
228 switch (sa->algo_type) {
229 case ALGO_TYPE_AES_GCM:
230 if (sa->aad_len != 0) {
231 aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
232 aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
235 case ALGO_TYPE_AES_CCM:
236 if (sa->aad_len != 0) {
237 caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
238 aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
241 case ALGO_TYPE_CHACHA20_POLY1305:
242 if (sa->aad_len != 0) {
243 chacha_aad = (struct aead_chacha20_poly1305_aad *)
244 (icv->va + sa->icv_len);
245 aead_chacha20_poly1305_aad_fill(chacha_aad,
246 sa->spi, sqc, IS_ESN(sa));
253 inb_get_sqn(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
254 struct rte_mbuf *mb, uint32_t hlen, rte_be64_t *sqc)
258 struct rte_esp_hdr *esph;
260 esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
263 * retrieve and reconstruct SQN, then check it, then
264 * convert it back into network byte order.
266 sqn = rte_be_to_cpu_32(esph->seq);
268 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
269 *sqc = rte_cpu_to_be_64(sqn);
271 /* check IPsec window */
272 rc = esn_inb_check_sqn(rsn, sa, sqn);
277 /* prepare packet for upcoming processing */
278 static inline int32_t
279 inb_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
280 uint32_t hlen, union sym_op_data *icv)
282 uint32_t clen, icv_len, icv_ofs, plen;
285 /* start packet manipulation */
289 /* check that packet has a valid length */
290 clen = plen - sa->ctp.cipher.length;
291 if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
294 /* find ICV location */
295 icv_len = sa->icv_len;
296 icv_ofs = mb->pkt_len - icv_len;
298 ml = mbuf_get_seg_ofs(mb, &icv_ofs);
301 * if ICV is spread by two segments, then try to
302 * move ICV completely into the last segment.
304 if (ml->data_len < icv_ofs + icv_len) {
306 ml = move_icv(ml, icv_ofs);
310 /* new ICV location */
314 icv_ofs += sa->sqh_len;
317 * we have to allocate space for AAD somewhere,
318 * right now - just use free trailing space at the last segment.
319 * Would probably be more convenient to reserve space for AAD
320 * inside rte_crypto_op itself
321 * (again for IV space is already reserved inside cop).
323 if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
326 icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
327 icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
330 * if esn is used then high-order 32 bits are also used in ICV
331 * calculation but are not transmitted, update packet length
332 * to be consistent with auth data length and offset, this will
333 * be subtracted from packet length in post crypto processing
335 mb->pkt_len += sa->sqh_len;
336 ml->data_len += sa->sqh_len;
341 static inline int32_t
342 inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
343 struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
348 rc = inb_get_sqn(sa, rsn, mb, hlen, &sqn);
352 rc = inb_prepare(sa, mb, hlen, icv);
356 inb_pkt_xprepare(sa, sqn, icv);
361 * setup/update packets and crypto ops for ESP inbound case.
364 esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
365 struct rte_crypto_op *cop[], uint16_t num)
369 struct rte_ipsec_sa *sa;
370 struct rte_cryptodev_sym_session *cs;
371 struct replay_sqn *rsn;
372 union sym_op_data icv;
377 rsn = rsn_acquire(sa);
380 for (i = 0; i != num; i++) {
382 hl = mb[i]->l2_len + mb[i]->l3_len;
383 rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
385 lksd_none_cop_prepare(cop[k], cs, mb[i]);
386 inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
394 rsn_release(sa, rsn);
396 /* copy not prepared mbufs beyond good ones */
397 if (k != num && k != 0)
398 move_bad_mbufs(mb, dr, num, num - k);
404 * Start with processing inbound packet.
405 * This is common part for both tunnel and transport mode.
406 * Extract information that will be needed later from mbuf metadata and
407 * actual packet data:
408 * - mbuf for packet's last segment
409 * - length of the L2/L3 headers
410 * - esp tail structure
413 process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml,
414 struct rte_esp_tail *espt, uint32_t *hlen, uint32_t *tofs)
416 const struct rte_esp_tail *pt;
419 ofs = mb->pkt_len - tlen;
420 hlen[0] = mb->l2_len + mb->l3_len;
421 ml[0] = mbuf_get_seg_ofs(mb, &ofs);
422 pt = rte_pktmbuf_mtod_offset(ml[0], const struct rte_esp_tail *, ofs);
428 * Helper function to check pad bytes values.
429 * Note that pad bytes can be spread across multiple segments.
432 check_pad_bytes(struct rte_mbuf *mb, uint32_t ofs, uint32_t len)
437 for (n = 0; n != len; n += k, mb = mb->next) {
438 k = mb->data_len - ofs;
439 k = RTE_MIN(k, len - n);
440 pd = rte_pktmbuf_mtod_offset(mb, const uint8_t *, ofs);
441 if (memcmp(pd, esp_pad_bytes + n, k) != 0)
450 * packet checks for transport mode:
451 * - no reported IPsec related failures in ol_flags
452 * - tail and header lengths are valid
453 * - padding bytes are valid
454 * apart from checks, function also updates tail offset (and segment)
455 * by taking into account pad length.
457 static inline int32_t
458 trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
459 uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen)
461 if ((mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) != 0 ||
462 tlen + hlen > mb->pkt_len)
465 /* padding bytes are spread over multiple segments */
466 if (tofs[0] < espt.pad_len) {
467 tofs[0] = mb->pkt_len - tlen;
468 ml[0] = mbuf_get_seg_ofs(mb, tofs);
470 tofs[0] -= espt.pad_len;
472 return check_pad_bytes(ml[0], tofs[0], espt.pad_len);
476 * packet checks for tunnel mode:
477 * - same as for transport mode
478 * - esp tail next proto contains expected for that SA value
480 static inline int32_t
481 tun_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
482 uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen,
485 return (trs_process_check(mb, ml, tofs, espt, hlen, tlen) ||
486 espt.next_proto != proto);
490 * step two for tunnel mode:
491 * - read SQN value (for future use)
492 * - cut of ICV, ESP tail and padding bytes
493 * - cut of ESP header and IV, also if needed - L2/L3 headers
494 * (controlled by *adj* value)
497 tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
498 uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
500 const struct rte_esp_hdr *ph;
503 ph = rte_pktmbuf_mtod_offset(mb, const struct rte_esp_hdr *, hlen);
506 /* cut of ICV, ESP tail and padding bytes */
507 mbuf_cut_seg_ofs(mb, ml, tofs, tlen);
509 /* cut of L2/L3 headers, ESP header and IV */
510 return rte_pktmbuf_adj(mb, adj);
514 * step two for transport mode:
515 * - read SQN value (for future use)
516 * - cut of ICV, ESP tail and padding bytes
517 * - cut of ESP header and IV
518 * - move L2/L3 header to fill the gap after ESP header removal
521 trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
522 uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
526 /* get start of the packet before modifications */
527 op = rte_pktmbuf_mtod(mb, char *);
529 /* cut off ESP header and IV */
530 np = tun_process_step2(mb, ml, hlen, adj, tofs, tlen, sqn);
532 /* move header bytes to fill the gap after ESP header removal */
533 remove_esph(np, op, hlen);
538 * step three for transport mode:
539 * update mbuf metadata:
544 trs_process_step3(struct rte_mbuf *mb)
546 /* reset mbuf packet type */
547 mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
549 /* clear the RTE_MBUF_F_RX_SEC_OFFLOAD flag if set */
550 mb->ol_flags &= ~RTE_MBUF_F_RX_SEC_OFFLOAD;
554 * step three for tunnel mode:
555 * update mbuf metadata:
561 tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
563 /* reset mbuf metadata: L2/L3 len, packet type */
564 mb->packet_type = RTE_PTYPE_UNKNOWN;
565 mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
567 /* clear the RTE_MBUF_F_RX_SEC_OFFLOAD flag if set */
568 mb->ol_flags &= ~RTE_MBUF_F_RX_SEC_OFFLOAD;
572 * *process* function for tunnel packets
574 static inline uint16_t
575 tun_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
576 uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
578 uint32_t adj, i, k, tl, bytes;
579 uint32_t hl[num], to[num];
580 struct rte_esp_tail espt[num];
581 struct rte_mbuf *ml[num];
586 * remove icv, esp trailer and high-order
587 * 32 bits of esn from packet length
589 const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
590 const uint32_t cofs = sa->ctp.cipher.offset;
593 * to minimize stalls due to load latency,
594 * read mbufs metadata and esp tail first.
596 for (i = 0; i != num; i++)
597 process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
601 for (i = 0; i != num; i++) {
604 tl = tlen + espt[i].pad_len;
606 /* check that packet is valid */
607 if (tun_process_check(mb[i], &ml[i], &to[i], espt[i], adj, tl,
610 outh = rte_pktmbuf_mtod_offset(mb[i], uint8_t *,
613 /* modify packet's layout */
614 inh = tun_process_step2(mb[i], ml[i], hl[i], adj,
617 /* update inner ip header */
618 update_tun_inb_l3hdr(sa, outh, inh);
620 /* update mbuf's metadata */
621 tun_process_step3(mb[i], sa->tx_offload.msk,
624 bytes += mb[i]->pkt_len;
629 sa->statistics.count += k;
630 sa->statistics.bytes += bytes;
635 * *process* function for tunnel packets
637 static inline uint16_t
638 trs_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
639 uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
642 uint32_t i, k, l2, tl, bytes;
643 uint32_t hl[num], to[num];
644 struct rte_esp_tail espt[num];
645 struct rte_mbuf *ml[num];
648 * remove icv, esp trailer and high-order
649 * 32 bits of esn from packet length
651 const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
652 const uint32_t cofs = sa->ctp.cipher.offset;
655 * to minimize stalls due to load latency,
656 * read mbufs metadata and esp tail first.
658 for (i = 0; i != num; i++)
659 process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
663 for (i = 0; i != num; i++) {
665 tl = tlen + espt[i].pad_len;
668 /* check that packet is valid */
669 if (trs_process_check(mb[i], &ml[i], &to[i], espt[i],
670 hl[i] + cofs, tl) == 0) {
672 /* modify packet's layout */
673 np = trs_process_step2(mb[i], ml[i], hl[i], cofs,
675 update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,
676 l2, hl[i] - l2, espt[i].next_proto);
678 /* update mbuf's metadata */
679 trs_process_step3(mb[i]);
681 bytes += mb[i]->pkt_len;
686 sa->statistics.count += k;
687 sa->statistics.bytes += bytes;
692 * for group of ESP inbound packets perform SQN check and update.
694 static inline uint16_t
695 esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
696 uint32_t dr[], uint16_t num)
699 struct replay_sqn *rsn;
701 /* replay not enabled */
702 if (sa->replay.win_sz == 0)
705 rsn = rsn_update_start(sa);
708 for (i = 0; i != num; i++) {
709 if (esn_inb_update_sqn(rsn, sa, rte_be_to_cpu_32(sqn[i])) == 0)
715 rsn_update_finish(sa, rsn);
720 * process group of ESP inbound packets.
722 static inline uint16_t
723 esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
724 uint16_t num, uint8_t sqh_len, esp_inb_process_t process)
730 /* process packets, extract seq numbers */
731 k = process(sa, mb, sqn, dr, num, sqh_len);
733 /* handle unprocessed mbufs */
734 if (k != num && k != 0)
735 move_bad_mbufs(mb, dr, num, num - k);
737 /* update SQN and replay window */
738 n = esp_inb_rsn_update(sa, sqn, dr, k);
740 /* handle mbufs with wrong SQN */
741 if (n != k && n != 0)
742 move_bad_mbufs(mb, dr, k, k - n);
751 * Prepare (plus actual crypto/auth) routine for inbound CPU-CRYPTO
752 * (synchronous mode).
755 cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
756 struct rte_mbuf *mb[], uint16_t num)
760 struct rte_ipsec_sa *sa;
761 struct replay_sqn *rsn;
762 union sym_op_data icv;
763 struct rte_crypto_va_iova_ptr iv[num];
764 struct rte_crypto_va_iova_ptr aad[num];
765 struct rte_crypto_va_iova_ptr dgst[num];
769 uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
774 rsn = rsn_acquire(sa);
776 /* do preparation for all packets */
777 for (i = 0, k = 0; i != num; i++) {
779 /* calculate ESP header offset */
780 l4ofs[k] = mb[i]->l2_len + mb[i]->l3_len;
782 /* prepare ESP packet for processing */
783 rc = inb_pkt_prepare(sa, rsn, mb[i], l4ofs[k], &icv);
785 /* get encrypted data offset and length */
786 clen[k] = inb_cpu_crypto_prepare(sa, mb[i],
787 l4ofs + k, rc, ivbuf[k]);
789 /* fill iv, digest and aad */
791 aad[k].va = icv.va + sa->icv_len;
792 dgst[k++].va = icv.va;
799 /* release rsn lock */
800 rsn_release(sa, rsn);
802 /* copy not prepared mbufs beyond good ones */
803 if (k != num && k != 0)
804 move_bad_mbufs(mb, dr, num, num - k);
806 /* convert mbufs to iovecs and do actual crypto/auth processing */
808 cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
814 * process group of ESP inbound tunnel packets.
817 esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
818 struct rte_mbuf *mb[], uint16_t num)
820 struct rte_ipsec_sa *sa = ss->sa;
822 return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, tun_process);
826 inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
827 struct rte_mbuf *mb[], uint16_t num)
829 return esp_inb_pkt_process(ss->sa, mb, num, 0, tun_process);
833 * process group of ESP inbound transport packets.
836 esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
837 struct rte_mbuf *mb[], uint16_t num)
839 struct rte_ipsec_sa *sa = ss->sa;
841 return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, trs_process);
845 inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
846 struct rte_mbuf *mb[], uint16_t num)
848 return esp_inb_pkt_process(ss->sa, mb, num, 0, trs_process);