#include "misc.h"
#include "pad.h"
+typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
+ struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
+ uint8_t sqh_len);
+
+/*
+ * helper function to fill crypto_sym op for cipher+auth algorithms.
+ * used by inb_cop_prepare(), see below.
+ */
+static inline void
+sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
+ const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
+ uint32_t pofs, uint32_t plen)
+{
+ sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
+ sop->cipher.data.length = plen - sa->ctp.cipher.length;
+ sop->auth.data.offset = pofs + sa->ctp.auth.offset;
+ sop->auth.data.length = plen - sa->ctp.auth.length;
+ sop->auth.digest.data = icv->va;
+ sop->auth.digest.phys_addr = icv->pa;
+}
+
+/*
+ * helper function to fill crypto_sym op for aead algorithms
+ * used by inb_cop_prepare(), see below.
+ */
+static inline void
+sop_aead_prepare(struct rte_crypto_sym_op *sop,
+ const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
+ uint32_t pofs, uint32_t plen)
+{
+ sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
+ sop->aead.data.length = plen - sa->ctp.cipher.length;
+ sop->aead.digest.data = icv->va;
+ sop->aead.digest.phys_addr = icv->pa;
+ sop->aead.aad.data = icv->va + sa->icv_len;
+ sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
+}
+
/*
- * setup crypto op and crypto sym op for ESP inbound tunnel packet.
+ * setup crypto op and crypto sym op for ESP inbound packet.
*/
static inline void
inb_cop_prepare(struct rte_crypto_op *cop,
uint32_t algo;
algo = sa->algo_type;
+ ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+ pofs + sizeof(struct rte_esp_hdr));
/* fill sym op fields */
sop = cop->sym;
switch (algo) {
case ALGO_TYPE_AES_GCM:
- sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
- sop->aead.data.length = plen - sa->ctp.cipher.length;
- sop->aead.digest.data = icv->va;
- sop->aead.digest.phys_addr = icv->pa;
- sop->aead.aad.data = icv->va + sa->icv_len;
- sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
+ sop_aead_prepare(sop, sa, icv, pofs, plen);
/* fill AAD IV (located inside crypto op) */
gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
sa->iv_ofs);
- ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
- pofs + sizeof(struct esp_hdr));
aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_CBC:
case ALGO_TYPE_3DES_CBC:
- sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
- sop->cipher.data.length = plen - sa->ctp.cipher.length;
- sop->auth.data.offset = pofs + sa->ctp.auth.offset;
- sop->auth.data.length = plen - sa->ctp.auth.length;
- sop->auth.digest.data = icv->va;
- sop->auth.digest.phys_addr = icv->pa;
+ sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
/* copy iv from the input packet to the cop */
ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
- ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
- pofs + sizeof(struct esp_hdr));
copy_iv(ivc, ivp, sa->iv_len);
break;
case ALGO_TYPE_AES_CTR:
- sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
- sop->cipher.data.length = plen - sa->ctp.cipher.length;
- sop->auth.data.offset = pofs + sa->ctp.auth.offset;
- sop->auth.data.length = plen - sa->ctp.auth.length;
- sop->auth.digest.data = icv->va;
- sop->auth.digest.phys_addr = icv->pa;
+ sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
- /* copy iv from the input packet to the cop */
+ /* fill CTR block (located inside crypto op) */
ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
sa->iv_ofs);
- ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
- pofs + sizeof(struct esp_hdr));
aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
break;
case ALGO_TYPE_NULL:
- sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
- sop->cipher.data.length = plen - sa->ctp.cipher.length;
- sop->auth.data.offset = pofs + sa->ctp.auth.offset;
- sop->auth.data.length = plen - sa->ctp.auth.length;
- sop->auth.digest.data = icv->va;
- sop->auth.digest.phys_addr = icv->pa;
+ sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
break;
}
}
+/*
+ * Helper function for prepare() to deal with situation when
+ * ICV is spread by two segments. Tries to move ICV completely into the
+ * last segment.
+ */
+static struct rte_mbuf *
+move_icv(struct rte_mbuf *ml, uint32_t ofs)
+{
+ uint32_t n;
+ struct rte_mbuf *ms;
+ const void *prev;
+ void *new;
+
+ ms = ml->next;
+ n = ml->data_len - ofs;
+
+ prev = rte_pktmbuf_mtod_offset(ml, const void *, ofs);
+ new = rte_pktmbuf_prepend(ms, n);
+ if (new == NULL)
+ return NULL;
+
+ /* move n ICV bytes from ml into ms */
+ rte_memcpy(new, prev, n);
+ ml->data_len -= n;
+
+ return ms;
+}
+
/*
* for pure cryptodev (lookaside none) depending on SA settings,
* we might have to write some extra data to the packet.
{
int32_t rc;
uint64_t sqn;
- uint32_t clen, icv_ofs, plen;
+ uint32_t clen, icv_len, icv_ofs, plen;
struct rte_mbuf *ml;
- struct esp_hdr *esph;
+ struct rte_esp_hdr *esph;
- esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
+ esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
/*
* retrieve and reconstruct SQN, then check it, then
plen = mb->pkt_len;
plen = plen - hlen;
- ml = rte_pktmbuf_lastseg(mb);
- icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
-
/* check that packet has a valid length */
clen = plen - sa->ctp.cipher.length;
if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
return -EBADMSG;
+ /* find ICV location */
+ icv_len = sa->icv_len;
+ icv_ofs = mb->pkt_len - icv_len;
+
+ ml = mbuf_get_seg_ofs(mb, &icv_ofs);
+
+ /*
+ * if ICV is spread by two segments, then try to
+ * move ICV completely into the last segment.
+ */
+ if (ml->data_len < icv_ofs + icv_len) {
+
+ ml = move_icv(ml, icv_ofs);
+ if (ml == NULL)
+ return -ENOSPC;
+
+ /* new ICV location */
+ icv_ofs = 0;
+ }
+
+ icv_ofs += sa->sqh_len;
+
/* we have to allocate space for AAD somewhere,
* right now - just use free trailing space at the last segment.
* Would probably be more convenient to reserve space for AAD
icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
+ /*
+ * if esn is used then high-order 32 bits are also used in ICV
+ * calculation but are not transmitted, update packet length
+ * to be consistent with auth data length and offset, this will
+ * be subtracted from packet length in post crypto processing
+ */
+ mb->pkt_len += sa->sqh_len;
+ ml->data_len += sa->sqh_len;
+
inb_pkt_xprepare(sa, sqn, icv);
return plen;
}
}
/*
- * process ESP inbound tunnel packet.
+ * Start with processing inbound packet.
+ * This is common part for both tunnel and transport mode.
+ * Extract information that will be needed later from mbuf metadata and
+ * actual packet data:
+ * - mbuf for packet's last segment
+ * - length of the L2/L3 headers
+ * - esp tail structure
+ */
+static inline void
+process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml,
+ struct rte_esp_tail *espt, uint32_t *hlen, uint32_t *tofs)
+{
+ const struct rte_esp_tail *pt;
+ uint32_t ofs;
+
+ ofs = mb->pkt_len - tlen;
+ hlen[0] = mb->l2_len + mb->l3_len;
+ ml[0] = mbuf_get_seg_ofs(mb, &ofs);
+ pt = rte_pktmbuf_mtod_offset(ml[0], const struct rte_esp_tail *, ofs);
+ tofs[0] = ofs;
+ espt[0] = pt[0];
+}
+
+/*
+ * Helper function to check pad bytes values.
+ * Note that pad bytes can be spread across multiple segments.
*/
static inline int
-inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
- uint32_t *sqn)
+check_pad_bytes(struct rte_mbuf *mb, uint32_t ofs, uint32_t len)
{
- uint32_t hlen, icv_len, tlen;
- struct esp_hdr *esph;
- struct esp_tail *espt;
- struct rte_mbuf *ml;
- char *pd;
+ const uint8_t *pd;
+ uint32_t k, n;
+
+ for (n = 0; n != len; n += k, mb = mb->next) {
+ k = mb->data_len - ofs;
+ k = RTE_MIN(k, len - n);
+ pd = rte_pktmbuf_mtod_offset(mb, const uint8_t *, ofs);
+ if (memcmp(pd, esp_pad_bytes + n, k) != 0)
+ break;
+ ofs = 0;
+ }
+
+ return len - n;
+}
- if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
+/*
+ * packet checks for transport mode:
+ * - no reported IPsec related failures in ol_flags
+ * - tail and header lengths are valid
+ * - padding bytes are valid
+ * apart from checks, function also updates tail offset (and segment)
+ * by taking into account pad length.
+ */
+static inline int32_t
+trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
+ uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen)
+{
+ if ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 ||
+ tlen + hlen > mb->pkt_len)
return -EBADMSG;
- icv_len = sa->icv_len;
+ /* padding bytes are spread over multiple segments */
+ if (tofs[0] < espt.pad_len) {
+ tofs[0] = mb->pkt_len - tlen;
+ ml[0] = mbuf_get_seg_ofs(mb, tofs);
+ } else
+ tofs[0] -= espt.pad_len;
- ml = rte_pktmbuf_lastseg(mb);
- espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
- ml->data_len - icv_len - sizeof(*espt));
+ return check_pad_bytes(ml[0], tofs[0], espt.pad_len);
+}
- /*
- * check padding and next proto.
- * return an error if something is wrong.
- */
- pd = (char *)espt - espt->pad_len;
- if (espt->next_proto != sa->proto ||
- memcmp(pd, esp_pad_bytes, espt->pad_len))
- return -EINVAL;
+/*
+ * packet checks for tunnel mode:
+ * - same as for trasnport mode
+ * - esp tail next proto contains expected for that SA value
+ */
+static inline int32_t
+tun_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
+ uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen,
+ uint8_t proto)
+{
+ return (trs_process_check(mb, ml, tofs, espt, hlen, tlen) ||
+ espt.next_proto != proto);
+}
+
+/*
+ * step two for tunnel mode:
+ * - read SQN value (for future use)
+ * - cut of ICV, ESP tail and padding bytes
+ * - cut of ESP header and IV, also if needed - L2/L3 headers
+ * (controlled by *adj* value)
+ */
+static inline void *
+tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
+ uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
+{
+ const struct rte_esp_hdr *ph;
+
+ /* read SQN value */
+ ph = rte_pktmbuf_mtod_offset(mb, const struct rte_esp_hdr *, hlen);
+ sqn[0] = ph->seq;
/* cut of ICV, ESP tail and padding bytes */
- tlen = icv_len + sizeof(*espt) + espt->pad_len;
- ml->data_len -= tlen;
- mb->pkt_len -= tlen;
+ mbuf_cut_seg_ofs(mb, ml, tofs, tlen);
/* cut of L2/L3 headers, ESP header and IV */
- hlen = mb->l2_len + mb->l3_len;
- esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
- rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);
+ return rte_pktmbuf_adj(mb, adj);
+}
+
+/*
+ * step two for transport mode:
+ * - read SQN value (for future use)
+ * - cut of ICV, ESP tail and padding bytes
+ * - cut of ESP header and IV
+ * - move L2/L3 header to fill the gap after ESP header removal
+ */
+static inline void *
+trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
+ uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn)
+{
+ char *np, *op;
+
+ /* get start of the packet before modifications */
+ op = rte_pktmbuf_mtod(mb, char *);
+
+ /* cut off ESP header and IV */
+ np = tun_process_step2(mb, ml, hlen, adj, tofs, tlen, sqn);
+
+ /* move header bytes to fill the gap after ESP header removal */
+ remove_esph(np, op, hlen);
+ return np;
+}
- /* retrieve SQN for later check */
- *sqn = rte_be_to_cpu_32(esph->seq);
+/*
+ * step three for transport mode:
+ * update mbuf metadata:
+ * - packet_type
+ * - ol_flags
+ */
+static inline void
+trs_process_step3(struct rte_mbuf *mb)
+{
+ /* reset mbuf packet type */
+ mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
+ /* clear the PKT_RX_SEC_OFFLOAD flag if set */
+ mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
+}
+
+/*
+ * step three for tunnel mode:
+ * update mbuf metadata:
+ * - packet_type
+ * - ol_flags
+ * - tx_offload
+ */
+static inline void
+tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
+{
/* reset mbuf metatdata: L2/L3 len, packet type */
mb->packet_type = RTE_PTYPE_UNKNOWN;
- mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
- sa->tx_offload.val;
+ mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
/* clear the PKT_RX_SEC_OFFLOAD flag if set */
- mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
- return 0;
+ mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
}
/*
- * process ESP inbound transport packet.
+ * *process* function for tunnel packets
*/
-static inline int
-inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
- uint32_t *sqn)
+static inline uint16_t
+tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
+ uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
{
- uint32_t hlen, icv_len, l2len, l3len, tlen;
- struct esp_hdr *esph;
- struct esp_tail *espt;
- struct rte_mbuf *ml;
- char *np, *op, *pd;
+ uint32_t adj, i, k, tl;
+ uint32_t hl[num], to[num];
+ struct rte_esp_tail espt[num];
+ struct rte_mbuf *ml[num];
+ const void *outh;
+ void *inh;
- if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
- return -EBADMSG;
+ /*
+ * remove icv, esp trailer and high-order
+ * 32 bits of esn from packet length
+ */
+ const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
+ const uint32_t cofs = sa->ctp.cipher.offset;
- icv_len = sa->icv_len;
+ /*
+ * to minimize stalls due to load latency,
+ * read mbufs metadata and esp tail first.
+ */
+ for (i = 0; i != num; i++)
+ process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
- ml = rte_pktmbuf_lastseg(mb);
- espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
- ml->data_len - icv_len - sizeof(*espt));
+ adj = hl[i] + cofs;
+ tl = tlen + espt[i].pad_len;
- /* check padding, return an error if something is wrong. */
- pd = (char *)espt - espt->pad_len;
- if (memcmp(pd, esp_pad_bytes, espt->pad_len))
- return -EINVAL;
+ /* check that packet is valid */
+ if (tun_process_check(mb[i], &ml[i], &to[i], espt[i], adj, tl,
+ sa->proto) == 0) {
- /* cut of ICV, ESP tail and padding bytes */
- tlen = icv_len + sizeof(*espt) + espt->pad_len;
- ml->data_len -= tlen;
- mb->pkt_len -= tlen;
-
- /* retrieve SQN for later check */
- l2len = mb->l2_len;
- l3len = mb->l3_len;
- hlen = l2len + l3len;
- op = rte_pktmbuf_mtod(mb, char *);
- esph = (struct esp_hdr *)(op + hlen);
- *sqn = rte_be_to_cpu_32(esph->seq);
+ outh = rte_pktmbuf_mtod_offset(mb[i], uint8_t *,
+ mb[i]->l2_len);
- /* cut off ESP header and IV, update L3 header */
- np = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);
- remove_esph(np, op, hlen);
- update_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,
- espt->next_proto);
+ /* modify packet's layout */
+ inh = tun_process_step2(mb[i], ml[i], hl[i], adj,
+ to[i], tl, sqn + k);
- /* reset mbuf packet type */
- mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
+ /* update inner ip header */
+ update_tun_inb_l3hdr(sa, outh, inh);
- /* clear the PKT_RX_SEC_OFFLOAD flag if set */
- mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
- return 0;
+ /* update mbuf's metadata */
+ tun_process_step3(mb[i], sa->tx_offload.msk,
+ sa->tx_offload.val);
+ k++;
+ } else
+ dr[i - k] = i;
+ }
+
+ return k;
+}
+
+
+/*
+ * *process* function for tunnel packets
+ */
+static inline uint16_t
+trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
+ uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
+{
+ char *np;
+ uint32_t i, k, l2, tl;
+ uint32_t hl[num], to[num];
+ struct rte_esp_tail espt[num];
+ struct rte_mbuf *ml[num];
+
+ /*
+ * remove icv, esp trailer and high-order
+ * 32 bits of esn from packet length
+ */
+ const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
+ const uint32_t cofs = sa->ctp.cipher.offset;
+
+ /*
+ * to minimize stalls due to load latency,
+ * read mbufs metadata and esp tail first.
+ */
+ for (i = 0; i != num; i++)
+ process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+
+ tl = tlen + espt[i].pad_len;
+ l2 = mb[i]->l2_len;
+
+ /* check that packet is valid */
+ if (trs_process_check(mb[i], &ml[i], &to[i], espt[i],
+ hl[i] + cofs, tl) == 0) {
+
+ /* modify packet's layout */
+ np = trs_process_step2(mb[i], ml[i], hl[i], cofs,
+ to[i], tl, sqn + k);
+ update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,
+ l2, hl[i] - l2, espt[i].next_proto);
+
+ /* update mbuf's metadata */
+ trs_process_step3(mb[i]);
+ k++;
+ } else
+ dr[i - k] = i;
+ }
+
+ return k;
}
/*
uint32_t i, k;
struct replay_sqn *rsn;
+ /* replay not enabled */
+ if (sa->replay.win_sz == 0)
+ return num;
+
rsn = rsn_update_start(sa);
k = 0;
for (i = 0; i != num; i++) {
- if (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)
+ if (esn_inb_update_sqn(rsn, sa, rte_be_to_cpu_32(sqn[i])) == 0)
k++;
else
dr[i - k] = i;
}
/*
- * process group of ESP inbound tunnel packets.
+ * process group of ESP inbound packets.
*/
-uint16_t
-esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
- struct rte_mbuf *mb[], uint16_t num)
+static inline uint16_t
+esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
+ uint16_t num, uint8_t sqh_len, esp_inb_process_t process)
{
- uint32_t i, k, n;
- struct rte_ipsec_sa *sa;
+ uint32_t k, n;
uint32_t sqn[num];
uint32_t dr[num];
- sa = ss->sa;
-
/* process packets, extract seq numbers */
-
- k = 0;
- for (i = 0; i != num; i++) {
- /* good packet */
- if (inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)
- k++;
- /* bad packet, will drop from furhter processing */
- else
- dr[i - k] = i;
- }
+ k = process(sa, mb, sqn, dr, num, sqh_len);
/* handle unprocessed mbufs */
if (k != num && k != 0)
}
/*
- * process group of ESP inbound transport packets.
+ * process group of ESP inbound tunnel packets.
*/
uint16_t
-esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num)
{
- uint32_t i, k, n;
- uint32_t sqn[num];
- struct rte_ipsec_sa *sa;
- uint32_t dr[num];
+ struct rte_ipsec_sa *sa = ss->sa;
- sa = ss->sa;
-
- /* process packets, extract seq numbers */
-
- k = 0;
- for (i = 0; i != num; i++) {
- /* good packet */
- if (inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)
- k++;
- /* bad packet, will drop from furhter processing */
- else
- dr[i - k] = i;
- }
-
- /* handle unprocessed mbufs */
- if (k != num && k != 0)
- move_bad_mbufs(mb, dr, num, num - k);
+ return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, tun_process);
+}
- /* update SQN and replay winow */
- n = esp_inb_rsn_update(sa, sqn, dr, k);
+uint16_t
+inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_inb_pkt_process(ss->sa, mb, num, 0, tun_process);
+}
- /* handle mbufs with wrong SQN */
- if (n != k && n != 0)
- move_bad_mbufs(mb, dr, k, k - n);
+/*
+ * process group of ESP inbound transport packets.
+ */
+uint16_t
+esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ struct rte_ipsec_sa *sa = ss->sa;
- if (n != num)
- rte_errno = EBADMSG;
+ return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, trs_process);
+}
- return n;
+uint16_t
+inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_inb_pkt_process(ss->sa, mb, num, 0, trs_process);
}