1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2020 Intel Corporation
10 #include <rte_cryptodev.h>
13 #include "ipsec_sqn.h"
19 typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
20 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
21 union sym_op_data *icv, uint8_t sqh_len, uint8_t tso);
24 * helper function to fill crypto_sym op for cipher+auth algorithms.
25 * used by outb_cop_prepare(), see below.
28 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
29 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
30 uint32_t pofs, uint32_t plen)
32 sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
33 sop->cipher.data.length = sa->ctp.cipher.length + plen;
34 sop->auth.data.offset = sa->ctp.auth.offset + pofs;
35 sop->auth.data.length = sa->ctp.auth.length + plen;
36 sop->auth.digest.data = icv->va;
37 sop->auth.digest.phys_addr = icv->pa;
41 * helper function to fill crypto_sym op for cipher+auth algorithms.
42 * used by outb_cop_prepare(), see below.
45 sop_aead_prepare(struct rte_crypto_sym_op *sop,
46 const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
47 uint32_t pofs, uint32_t plen)
49 sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
50 sop->aead.data.length = sa->ctp.cipher.length + plen;
51 sop->aead.digest.data = icv->va;
52 sop->aead.digest.phys_addr = icv->pa;
53 sop->aead.aad.data = icv->va + sa->icv_len;
54 sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
58 * setup crypto op and crypto sym op for ESP outbound packet.
61 outb_cop_prepare(struct rte_crypto_op *cop,
62 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
63 const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
65 struct rte_crypto_sym_op *sop;
66 struct aead_gcm_iv *gcm;
67 struct aead_ccm_iv *ccm;
68 struct aead_chacha20_poly1305_iv *chacha20_poly1305;
69 struct aesctr_cnt_blk *ctr;
74 /* fill sym op fields */
78 case ALGO_TYPE_AES_CBC:
79 /* Cipher-Auth (AES-CBC *) case */
80 case ALGO_TYPE_3DES_CBC:
81 /* Cipher-Auth (3DES-CBC *) case */
84 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
86 case ALGO_TYPE_AES_GMAC:
88 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
90 /* fill AAD IV (located inside crypto op) */
91 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
93 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
95 case ALGO_TYPE_AES_GCM:
96 /* AEAD (AES_GCM) case */
97 sop_aead_prepare(sop, sa, icv, hlen, plen);
99 /* fill AAD IV (located inside crypto op) */
100 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
102 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
104 case ALGO_TYPE_AES_CCM:
105 /* AEAD (AES_CCM) case */
106 sop_aead_prepare(sop, sa, icv, hlen, plen);
108 /* fill AAD IV (located inside crypto op) */
109 ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
111 aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
113 case ALGO_TYPE_CHACHA20_POLY1305:
114 /* AEAD (CHACHA20_POLY) case */
115 sop_aead_prepare(sop, sa, icv, hlen, plen);
117 /* fill AAD IV (located inside crypto op) */
118 chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
119 struct aead_chacha20_poly1305_iv *,
121 aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
124 case ALGO_TYPE_AES_CTR:
125 /* Cipher-Auth (AES-CTR *) case */
126 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
128 /* fill CTR block (located inside crypto op) */
129 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
131 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
137 * setup/update packet data and metadata for ESP outbound tunnel case.
139 static inline int32_t
140 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
141 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
142 union sym_op_data *icv, uint8_t sqh_len, uint8_t tso)
144 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
146 struct rte_esp_hdr *esph;
147 struct rte_esp_tail *espt;
151 /* calculate extra header space required */
152 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
154 /* size of ipsec protected data */
156 plen = mb->pkt_len - l2len;
158 /* number of bytes to encrypt */
159 clen = plen + sizeof(*espt);
162 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
163 /* pad length + esp tail */
165 tlen = pdlen + sa->icv_len + sqh_len;
167 /* We don't need to pad/align packet or append ICV length
168 * when using TSO offload
171 tlen = pdlen + sqh_len;
174 /* do append and prepend */
175 ml = rte_pktmbuf_lastseg(mb);
176 if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
180 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
185 pdofs = ml->data_len;
186 ml->data_len += tlen;
188 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
190 /* update pkt l2/l3 len */
191 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
194 /* copy tunnel pkt header */
195 rte_memcpy(ph, sa->hdr, sa->hdr_len);
197 /* if UDP encap is enabled update the dgram_len */
198 if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
199 struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
200 (ph - sizeof(struct rte_udp_hdr));
201 udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len -
202 sa->hdr_l3_off - sa->hdr_len);
205 /* update original and new ip header fields */
206 update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
207 mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
209 /* update spi, seqn and iv */
210 esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
211 iv = (uint64_t *)(esph + 1);
212 copy_iv(iv, ivp, sa->iv_len);
215 esph->seq = sqn_low32(sqc);
218 pdofs += pdlen + sa->sqh_len;
221 pdlen -= sizeof(*espt);
223 /* copy padding data */
224 rte_memcpy(pt, esp_pad_bytes, pdlen);
226 /* update esp trailer */
227 espt = (struct rte_esp_tail *)(pt + pdlen);
228 espt->pad_len = pdlen;
229 espt->next_proto = sa->proto;
231 /* set icv va/pa value(s) */
232 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
233 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
239 * for pure cryptodev (lookaside none) depending on SA settings,
240 * we might have to write some extra data to the packet.
243 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
244 const union sym_op_data *icv)
247 struct aead_gcm_aad *gaad;
248 struct aead_ccm_aad *caad;
249 struct aead_chacha20_poly1305_aad *chacha20_poly1305_aad;
251 /* insert SQN.hi between ESP trailer and ICV */
252 if (sa->sqh_len != 0) {
253 psqh = (uint32_t *)(icv->va - sa->sqh_len);
254 psqh[0] = sqn_hi32(sqc);
258 * fill IV and AAD fields, if any (aad fields are placed after icv),
259 * right now we support only one AEAD algorithm: AES-GCM .
261 switch (sa->algo_type) {
262 case ALGO_TYPE_AES_GCM:
263 if (sa->aad_len != 0) {
264 gaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
265 aead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa));
268 case ALGO_TYPE_AES_CCM:
269 if (sa->aad_len != 0) {
270 caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
271 aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
274 case ALGO_TYPE_CHACHA20_POLY1305:
275 if (sa->aad_len != 0) {
276 chacha20_poly1305_aad = (struct aead_chacha20_poly1305_aad *)
277 (icv->va + sa->icv_len);
278 aead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad,
279 sa->spi, sqc, IS_ESN(sa));
288 * setup/update packets and crypto ops for ESP outbound tunnel case.
291 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
292 struct rte_crypto_op *cop[], uint16_t num)
298 struct rte_ipsec_sa *sa;
299 struct rte_cryptodev_sym_session *cs;
300 union sym_op_data icv;
301 uint64_t iv[IPSEC_MAX_IV_QWORD];
308 sqn = esn_outb_update_sqn(sa, &n);
310 rte_errno = EOVERFLOW;
313 for (i = 0; i != n; i++) {
315 sqc = rte_cpu_to_be_64(sqn + i);
318 /* try to update the packet itself */
319 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
321 /* success, setup crypto op */
323 outb_pkt_xprepare(sa, sqc, &icv);
324 lksd_none_cop_prepare(cop[k], cs, mb[i]);
325 outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
327 /* failure, put packet into the death-row */
334 /* copy not prepared mbufs beyond good ones */
335 if (k != n && k != 0)
336 move_bad_mbufs(mb, dr, n, n - k);
342 * setup/update packet data and metadata for ESP outbound transport case.
344 static inline int32_t
345 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
346 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
347 union sym_op_data *icv, uint8_t sqh_len, uint8_t tso)
350 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
352 struct rte_esp_hdr *esph;
353 struct rte_esp_tail *espt;
356 uint32_t l2len, l3len;
361 uhlen = l2len + l3len;
362 plen = mb->pkt_len - uhlen;
364 /* calculate extra header space required */
365 hlen = sa->iv_len + sizeof(*esph);
367 /* number of bytes to encrypt */
368 clen = plen + sizeof(*espt);
371 clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
372 /* pad length + esp tail */
374 tlen = pdlen + sa->icv_len + sqh_len;
376 /* We don't need to pad/align packet or append ICV length
377 * when using TSO offload
380 tlen = pdlen + sqh_len;
383 /* do append and insert */
384 ml = rte_pktmbuf_lastseg(mb);
385 if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
388 /* prepend space for ESP header */
389 ph = rte_pktmbuf_prepend(mb, hlen);
394 pdofs = ml->data_len;
395 ml->data_len += tlen;
397 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
399 /* shift L2/L3 headers */
400 insert_esph(ph, ph + hlen, uhlen);
402 /* update ip header fields */
403 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
406 /* update spi, seqn and iv */
407 esph = (struct rte_esp_hdr *)(ph + uhlen);
408 iv = (uint64_t *)(esph + 1);
409 copy_iv(iv, ivp, sa->iv_len);
412 esph->seq = sqn_low32(sqc);
415 pdofs += pdlen + sa->sqh_len;
418 pdlen -= sizeof(*espt);
420 /* copy padding data */
421 rte_memcpy(pt, esp_pad_bytes, pdlen);
423 /* update esp trailer */
424 espt = (struct rte_esp_tail *)(pt + pdlen);
425 espt->pad_len = pdlen;
426 espt->next_proto = np;
428 /* set icv va/pa value(s) */
429 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
430 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
436 * setup/update packets and crypto ops for ESP outbound transport case.
439 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
440 struct rte_crypto_op *cop[], uint16_t num)
443 uint32_t i, k, n, l2, l3;
446 struct rte_ipsec_sa *sa;
447 struct rte_cryptodev_sym_session *cs;
448 union sym_op_data icv;
449 uint64_t iv[IPSEC_MAX_IV_QWORD];
456 sqn = esn_outb_update_sqn(sa, &n);
458 rte_errno = EOVERFLOW;
461 for (i = 0; i != n; i++) {
466 sqc = rte_cpu_to_be_64(sqn + i);
469 /* try to update the packet itself */
470 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
472 /* success, setup crypto op */
474 outb_pkt_xprepare(sa, sqc, &icv);
475 lksd_none_cop_prepare(cop[k], cs, mb[i]);
476 outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
478 /* failure, put packet into the death-row */
485 /* copy not prepared mbufs beyond good ones */
486 if (k != n && k != 0)
487 move_bad_mbufs(mb, dr, n, n - k);
493 static inline uint32_t
494 outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
495 uint32_t plen, void *iv)
498 struct aead_gcm_iv *gcm;
499 struct aead_ccm_iv *ccm;
500 struct aead_chacha20_poly1305_iv *chacha20_poly1305;
501 struct aesctr_cnt_blk *ctr;
504 switch (sa->algo_type) {
505 case ALGO_TYPE_AES_GCM:
507 aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
509 case ALGO_TYPE_AES_CCM:
511 aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
513 case ALGO_TYPE_CHACHA20_POLY1305:
514 chacha20_poly1305 = iv;
515 aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
518 case ALGO_TYPE_AES_CTR:
520 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
524 *pofs += sa->ctp.auth.offset;
525 clen = plen + sa->ctp.auth.length;
530 cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
531 struct rte_mbuf *mb[], uint16_t num,
532 esp_outb_prepare_t prepare, uint32_t cofs_mask)
537 struct rte_ipsec_sa *sa;
540 union sym_op_data icv;
541 struct rte_crypto_va_iova_ptr iv[num];
542 struct rte_crypto_va_iova_ptr aad[num];
543 struct rte_crypto_va_iova_ptr dgst[num];
547 uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
552 sqn = esn_outb_update_sqn(sa, &n);
554 rte_errno = EOVERFLOW;
556 for (i = 0, k = 0; i != n; i++) {
561 /* calculate ESP header offset */
562 l4ofs[k] = (l2 + l3) & cofs_mask;
564 sqc = rte_cpu_to_be_64(sqn + i);
565 gen_iv(ivbuf[k], sqc);
567 /* try to update the packet itself */
568 rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len, 0);
570 /* success, proceed with preparations */
573 outb_pkt_xprepare(sa, sqc, &icv);
575 /* get encrypted data offset and length */
576 clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
579 /* fill iv, digest and aad */
581 aad[k].va = icv.va + sa->icv_len;
582 dgst[k++].va = icv.va;
589 /* copy not prepared mbufs beyond good ones */
590 if (k != n && k != 0)
591 move_bad_mbufs(mb, dr, n, n - k);
593 /* convert mbufs to iovecs and do actual crypto/auth processing */
595 cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
601 cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
602 struct rte_mbuf *mb[], uint16_t num)
604 return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0);
608 cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
609 struct rte_mbuf *mb[], uint16_t num)
611 return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare,
616 * process outbound packets for SA with ESN support,
617 * for algorithms that require SQN.hibits to be implicitly included
618 * into digest computation.
619 * In that case we have to move ICV bytes back to their proper place.
622 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
625 uint32_t i, k, icv_len, *icv, bytes;
627 struct rte_ipsec_sa *sa;
633 icv_len = sa->icv_len;
636 for (i = 0; i != num; i++) {
637 if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) {
638 ml = rte_pktmbuf_lastseg(mb[i]);
639 /* remove high-order 32 bits of esn from packet len */
640 mb[i]->pkt_len -= sa->sqh_len;
641 ml->data_len -= sa->sqh_len;
642 icv = rte_pktmbuf_mtod_offset(ml, void *,
643 ml->data_len - icv_len);
644 remove_sqh(icv, icv_len);
645 bytes += mb[i]->pkt_len;
650 sa->statistics.count += k;
651 sa->statistics.bytes += bytes;
653 /* handle unprocessed mbufs */
657 move_bad_mbufs(mb, dr, num, num - k);
664 * prepare packets for inline ipsec processing:
665 * set ol_flags and attach metadata.
668 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
669 struct rte_mbuf *mb[], uint16_t num)
671 uint32_t i, ol_flags, bytes;
673 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
675 for (i = 0; i != num; i++) {
677 mb[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
678 bytes += mb[i]->pkt_len;
680 rte_security_set_pkt_metadata(ss->security.ctx,
681 ss->security.ses, mb[i], NULL);
683 ss->sa->statistics.count += num;
684 ss->sa->statistics.bytes += bytes;
689 esn_outb_nb_segments(struct rte_mbuf *m)
691 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
692 uint16_t pkt_l3len = m->pkt_len - m->l2_len;
694 (m->tso_segsz > 0 && pkt_l3len > m->tso_segsz) ?
695 (pkt_l3len + m->tso_segsz - 1) / m->tso_segsz : 1;
698 return 1; /* no TSO */
701 /* Compute how many packets can be sent before overflow occurs */
702 static inline uint16_t
703 esn_outb_nb_valid_packets(uint16_t num, uint32_t n_sqn, uint16_t nb_segs[])
706 uint32_t seg_cnt = 0;
707 for (i = 0; i < num && seg_cnt < n_sqn; i++)
708 seg_cnt += nb_segs[i];
713 * process group of ESP outbound tunnel packets destined for
714 * INLINE_CRYPTO type of device.
717 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
718 struct rte_mbuf *mb[], uint16_t num)
721 uint32_t i, k, nb_segs_total, n_sqn;
724 struct rte_ipsec_sa *sa;
725 union sym_op_data icv;
726 uint64_t iv[IPSEC_MAX_IV_QWORD];
728 uint16_t nb_segs[num];
732 /* Calculate number of segments */
733 for (i = 0; i != num; i++) {
734 nb_segs[i] = esn_outb_nb_segments(mb[i]);
735 nb_segs_total += nb_segs[i];
738 n_sqn = nb_segs_total;
739 sqn = esn_outb_update_sqn(sa, &n_sqn);
740 if (n_sqn != nb_segs_total) {
741 rte_errno = EOVERFLOW;
742 /* if there are segmented packets find out how many can be
743 * sent until overflow occurs
745 if (nb_segs_total > num) /* there is at least 1 */
746 num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs);
748 num = n_sqn; /* no segmented packets */
752 for (i = 0; i != num; i++) {
754 sqc = rte_cpu_to_be_64(sqn);
758 /* try to update the packet itself */
759 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
761 (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0);
765 /* failure, put packet into the death-row */
772 /* copy not processed mbufs beyond good ones */
773 if (k != num && k != 0)
774 move_bad_mbufs(mb, dr, num, num - k);
776 inline_outb_mbuf_prepare(ss, mb, k);
781 * process group of ESP outbound transport packets destined for
782 * INLINE_CRYPTO type of device.
785 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
786 struct rte_mbuf *mb[], uint16_t num)
789 uint32_t i, k, nb_segs_total, n_sqn;
792 struct rte_ipsec_sa *sa;
793 union sym_op_data icv;
794 uint64_t iv[IPSEC_MAX_IV_QWORD];
796 uint16_t nb_segs[num];
800 /* Calculate number of segments */
801 for (i = 0; i != num; i++) {
802 nb_segs[i] = esn_outb_nb_segments(mb[i]);
803 nb_segs_total += nb_segs[i];
806 n_sqn = nb_segs_total;
807 sqn = esn_outb_update_sqn(sa, &n_sqn);
808 if (n_sqn != nb_segs_total) {
809 rte_errno = EOVERFLOW;
810 /* if there are segmented packets find out how many can be
811 * sent until overflow occurs
813 if (nb_segs_total > num) /* there is at least 1 */
814 num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs);
816 num = n_sqn; /* no segmented packets */
820 for (i = 0; i != num; i++) {
822 sqc = rte_cpu_to_be_64(sqn);
826 /* try to update the packet itself */
827 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
829 (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0);
833 /* failure, put packet into the death-row */
840 /* copy not processed mbufs beyond good ones */
841 if (k != num && k != 0)
842 move_bad_mbufs(mb, dr, num, num - k);
844 inline_outb_mbuf_prepare(ss, mb, k);
849 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
850 * actual processing is done by HW/PMD, just set flags and metadata.
853 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
854 struct rte_mbuf *mb[], uint16_t num)
856 inline_outb_mbuf_prepare(ss, mb, num);