LIBABIVER := 1
# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += esp_inb.c
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += esp_outb.c
SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += sa.c
SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += ses.c
icv[i] = icv[i + 1];
}
+/*
+ * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.
+ */
+static inline void
+lksd_none_cop_prepare(struct rte_crypto_op *cop,
+ struct rte_cryptodev_sym_session *cs, struct rte_mbuf *mb)
+{
+ struct rte_crypto_sym_op *sop;
+
+ sop = cop->sym;
+ cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ cop->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ sop->m_src = mb;
+ __rte_crypto_sym_op_attach_sym_session(sop, cs);
+}
+
#endif /* _CRYPTO_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_ipsec.h>
+#include <rte_esp.h>
+#include <rte_ip.h>
+#include <rte_errno.h>
+#include <rte_cryptodev.h>
+
+#include "sa.h"
+#include "ipsec_sqn.h"
+#include "crypto.h"
+#include "iph.h"
+#include "misc.h"
+#include "pad.h"
+
+/*
+ * setup crypto op and crypto sym op for ESP inbound tunnel packet.
+ */
+static inline int32_t
+inb_cop_prepare(struct rte_crypto_op *cop,
+ const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+ const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
+{
+ struct rte_crypto_sym_op *sop;
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint64_t *ivc, *ivp;
+ uint32_t algo, clen;
+
+ clen = plen - sa->ctp.cipher.length;
+ if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
+ return -EINVAL;
+
+ algo = sa->algo_type;
+
+ /* fill sym op fields */
+ sop = cop->sym;
+
+ switch (algo) {
+ case ALGO_TYPE_AES_GCM:
+ sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
+ sop->aead.data.length = clen;
+ sop->aead.digest.data = icv->va;
+ sop->aead.digest.phys_addr = icv->pa;
+ sop->aead.aad.data = icv->va + sa->icv_len;
+ sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
+
+ /* fill AAD IV (located inside crypto op) */
+ gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
+ sa->iv_ofs);
+ ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+ pofs + sizeof(struct esp_hdr));
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_AES_CBC:
+ case ALGO_TYPE_3DES_CBC:
+ sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
+ sop->cipher.data.length = clen;
+ sop->auth.data.offset = pofs + sa->ctp.auth.offset;
+ sop->auth.data.length = plen - sa->ctp.auth.length;
+ sop->auth.digest.data = icv->va;
+ sop->auth.digest.phys_addr = icv->pa;
+
+ /* copy iv from the input packet to the cop */
+ ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
+ ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+ pofs + sizeof(struct esp_hdr));
+ copy_iv(ivc, ivp, sa->iv_len);
+ break;
+ case ALGO_TYPE_AES_CTR:
+ sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
+ sop->cipher.data.length = clen;
+ sop->auth.data.offset = pofs + sa->ctp.auth.offset;
+ sop->auth.data.length = plen - sa->ctp.auth.length;
+ sop->auth.digest.data = icv->va;
+ sop->auth.digest.phys_addr = icv->pa;
+
+ /* copy iv from the input packet to the cop */
+ ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
+ sa->iv_ofs);
+ ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+ pofs + sizeof(struct esp_hdr));
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_NULL:
+ sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
+ sop->cipher.data.length = clen;
+ sop->auth.data.offset = pofs + sa->ctp.auth.offset;
+ sop->auth.data.length = plen - sa->ctp.auth.length;
+ sop->auth.digest.data = icv->va;
+ sop->auth.digest.phys_addr = icv->pa;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * for pure cryptodev (lookaside none) depending on SA settings,
+ * we might have to write some extra data to the packet.
+ */
+static inline void
+inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const union sym_op_data *icv)
+{
+ struct aead_gcm_aad *aad;
+
+ /* insert SQN.hi between ESP trailer and ICV */
+ if (sa->sqh_len != 0)
+ insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
+
+ /*
+ * fill AAD fields, if any (aad fields are placed after icv),
+ * right now we support only one AEAD algorithm: AES-GCM.
+ */
+ if (sa->aad_len != 0) {
+ aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
+ aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
+ }
+}
+
+/*
+ * setup/update packet data and metadata for ESP inbound tunnel case.
+ */
+static inline int32_t
+inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
+ struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
+{
+ int32_t rc;
+ uint64_t sqn;
+ uint32_t icv_ofs, plen;
+ struct rte_mbuf *ml;
+ struct esp_hdr *esph;
+
+ esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
+
+ /*
+ * retrieve and reconstruct SQN, then check it, then
+ * convert it back into network byte order.
+ */
+ sqn = rte_be_to_cpu_32(esph->seq);
+ if (IS_ESN(sa))
+ sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
+
+ rc = esn_inb_check_sqn(rsn, sa, sqn);
+ if (rc != 0)
+ return rc;
+
+ sqn = rte_cpu_to_be_64(sqn);
+
+ /* start packet manipulation */
+ plen = mb->pkt_len;
+ plen = plen - hlen;
+
+ ml = rte_pktmbuf_lastseg(mb);
+ icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
+
+ /* we have to allocate space for AAD somewhere,
+ * right now - just use free trailing space at the last segment.
+ * Would probably be more convenient to reserve space for AAD
+ * inside rte_crypto_op itself
+ * (again for IV space is already reserved inside cop).
+ */
+ if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
+ return -ENOSPC;
+
+ icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
+ icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
+
+ inb_pkt_xprepare(sa, sqn, icv);
+ return plen;
+}
+
+/*
+ * setup/update packets and crypto ops for ESP inbound case.
+ */
+uint16_t
+esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, hl;
+ struct rte_ipsec_sa *sa;
+ struct rte_cryptodev_sym_session *cs;
+ struct replay_sqn *rsn;
+ union sym_op_data icv;
+ uint32_t dr[num];
+
+ sa = ss->sa;
+ cs = ss->crypto.ses;
+ rsn = rsn_acquire(sa);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+
+ hl = mb[i]->l2_len + mb[i]->l3_len;
+ rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
+ if (rc >= 0) {
+ lksd_none_cop_prepare(cop[k], cs, mb[i]);
+ rc = inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
+ }
+
+ k += (rc == 0);
+ if (rc != 0) {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ rsn_release(sa, rsn);
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != num && k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+
+ return k;
+}
+
+/*
+ * process ESP inbound tunnel packet.
+ */
+static inline int
+inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+ uint32_t *sqn)
+{
+ uint32_t hlen, icv_len, tlen;
+ struct esp_hdr *esph;
+ struct esp_tail *espt;
+ struct rte_mbuf *ml;
+ char *pd;
+
+ if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
+ return -EBADMSG;
+
+ icv_len = sa->icv_len;
+
+ ml = rte_pktmbuf_lastseg(mb);
+ espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
+ ml->data_len - icv_len - sizeof(*espt));
+
+ /*
+ * check padding and next proto.
+ * return an error if something is wrong.
+ */
+ pd = (char *)espt - espt->pad_len;
+ if (espt->next_proto != sa->proto ||
+ memcmp(pd, esp_pad_bytes, espt->pad_len))
+ return -EINVAL;
+
+ /* cut of ICV, ESP tail and padding bytes */
+ tlen = icv_len + sizeof(*espt) + espt->pad_len;
+ ml->data_len -= tlen;
+ mb->pkt_len -= tlen;
+
+ /* cut of L2/L3 headers, ESP header and IV */
+ hlen = mb->l2_len + mb->l3_len;
+ esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
+ rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);
+
+ /* retrieve SQN for later check */
+ *sqn = rte_be_to_cpu_32(esph->seq);
+
+ /* reset mbuf metatdata: L2/L3 len, packet type */
+ mb->packet_type = RTE_PTYPE_UNKNOWN;
+ mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
+ sa->tx_offload.val;
+
+ /* clear the PKT_RX_SEC_OFFLOAD flag if set */
+ mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
+ return 0;
+}
+
+/*
+ * process ESP inbound transport packet.
+ */
+static inline int
+inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+ uint32_t *sqn)
+{
+ uint32_t hlen, icv_len, l2len, l3len, tlen;
+ struct esp_hdr *esph;
+ struct esp_tail *espt;
+ struct rte_mbuf *ml;
+ char *np, *op, *pd;
+
+ if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
+ return -EBADMSG;
+
+ icv_len = sa->icv_len;
+
+ ml = rte_pktmbuf_lastseg(mb);
+ espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
+ ml->data_len - icv_len - sizeof(*espt));
+
+ /* check padding, return an error if something is wrong. */
+ pd = (char *)espt - espt->pad_len;
+ if (memcmp(pd, esp_pad_bytes, espt->pad_len))
+ return -EINVAL;
+
+ /* cut of ICV, ESP tail and padding bytes */
+ tlen = icv_len + sizeof(*espt) + espt->pad_len;
+ ml->data_len -= tlen;
+ mb->pkt_len -= tlen;
+
+ /* retrieve SQN for later check */
+ l2len = mb->l2_len;
+ l3len = mb->l3_len;
+ hlen = l2len + l3len;
+ op = rte_pktmbuf_mtod(mb, char *);
+ esph = (struct esp_hdr *)(op + hlen);
+ *sqn = rte_be_to_cpu_32(esph->seq);
+
+ /* cut off ESP header and IV, update L3 header */
+ np = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);
+ remove_esph(np, op, hlen);
+ update_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,
+ espt->next_proto);
+
+ /* reset mbuf packet type */
+ mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
+
+ /* clear the PKT_RX_SEC_OFFLOAD flag if set */
+ mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
+ return 0;
+}
+
+/*
+ * for group of ESP inbound packets perform SQN check and update.
+ */
+static inline uint16_t
+esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
+ uint32_t dr[], uint16_t num)
+{
+ uint32_t i, k;
+ struct replay_sqn *rsn;
+
+ rsn = rsn_update_start(sa);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+ if (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)
+ k++;
+ else
+ dr[i - k] = i;
+ }
+
+ rsn_update_finish(sa, rsn);
+ return k;
+}
+
+/*
+ * process group of ESP inbound tunnel packets.
+ */
+uint16_t
+esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ uint32_t i, k, n;
+ struct rte_ipsec_sa *sa;
+ uint32_t sqn[num];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+
+ /* process packets, extract seq numbers */
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+ /* good packet */
+ if (inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)
+ k++;
+ /* bad packet, will drop from furhter processing */
+ else
+ dr[i - k] = i;
+ }
+
+ /* handle unprocessed mbufs */
+ if (k != num && k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+
+ /* update SQN and replay winow */
+ n = esp_inb_rsn_update(sa, sqn, dr, k);
+
+ /* handle mbufs with wrong SQN */
+ if (n != k && n != 0)
+ move_bad_mbufs(mb, dr, k, k - n);
+
+ if (n != num)
+ rte_errno = EBADMSG;
+
+ return n;
+}
+
+/*
+ * process group of ESP inbound transport packets.
+ */
+uint16_t
+esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ uint32_t i, k, n;
+ uint32_t sqn[num];
+ struct rte_ipsec_sa *sa;
+ uint32_t dr[num];
+
+ sa = ss->sa;
+
+ /* process packets, extract seq numbers */
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+ /* good packet */
+ if (inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)
+ k++;
+ /* bad packet, will drop from furhter processing */
+ else
+ dr[i - k] = i;
+ }
+
+ /* handle unprocessed mbufs */
+ if (k != num && k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+
+ /* update SQN and replay winow */
+ n = esp_inb_rsn_update(sa, sqn, dr, k);
+
+ /* handle mbufs with wrong SQN */
+ if (n != k && n != 0)
+ move_bad_mbufs(mb, dr, k, k - n);
+
+ if (n != num)
+ rte_errno = EBADMSG;
+
+ return n;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_ipsec.h>
+#include <rte_esp.h>
+#include <rte_ip.h>
+#include <rte_errno.h>
+#include <rte_cryptodev.h>
+
+#include "sa.h"
+#include "ipsec_sqn.h"
+#include "crypto.h"
+#include "iph.h"
+#include "misc.h"
+#include "pad.h"
+
+/*
+ * setup crypto op and crypto sym op for ESP outbound packet.
+ */
+static inline void
+outb_cop_prepare(struct rte_crypto_op *cop,
+ const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
+ const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
+{
+ struct rte_crypto_sym_op *sop;
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint32_t algo;
+
+ algo = sa->algo_type;
+
+ /* fill sym op fields */
+ sop = cop->sym;
+
+ switch (algo) {
+ case ALGO_TYPE_AES_CBC:
+ /* Cipher-Auth (AES-CBC *) case */
+ case ALGO_TYPE_3DES_CBC:
+ /* Cipher-Auth (3DES-CBC *) case */
+ case ALGO_TYPE_NULL:
+ /* NULL case */
+ sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
+ sop->cipher.data.length = sa->ctp.cipher.length + plen;
+ sop->auth.data.offset = sa->ctp.auth.offset + hlen;
+ sop->auth.data.length = sa->ctp.auth.length + plen;
+ sop->auth.digest.data = icv->va;
+ sop->auth.digest.phys_addr = icv->pa;
+ break;
+ case ALGO_TYPE_AES_GCM:
+ /* AEAD (AES_GCM) case */
+ sop->aead.data.offset = sa->ctp.cipher.offset + hlen;
+ sop->aead.data.length = sa->ctp.cipher.length + plen;
+ sop->aead.digest.data = icv->va;
+ sop->aead.digest.phys_addr = icv->pa;
+ sop->aead.aad.data = icv->va + sa->icv_len;
+ sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
+
+ /* fill AAD IV (located inside crypto op) */
+ gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
+ sa->iv_ofs);
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_AES_CTR:
+ /* Cipher-Auth (AES-CTR *) case */
+ sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
+ sop->cipher.data.length = sa->ctp.cipher.length + plen;
+ sop->auth.data.offset = sa->ctp.auth.offset + hlen;
+ sop->auth.data.length = sa->ctp.auth.length + plen;
+ sop->auth.digest.data = icv->va;
+ sop->auth.digest.phys_addr = icv->pa;
+
+ ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
+ sa->iv_ofs);
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ }
+}
+
+/*
+ * setup/update packet data and metadata for ESP outbound tunnel case.
+ */
+static inline int32_t
+outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
+ union sym_op_data *icv)
+{
+ uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
+ struct rte_mbuf *ml;
+ struct esp_hdr *esph;
+ struct esp_tail *espt;
+ char *ph, *pt;
+ uint64_t *iv;
+
+ /* calculate extra header space required */
+ hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
+
+ /* size of ipsec protected data */
+ l2len = mb->l2_len;
+ plen = mb->pkt_len - l2len;
+
+ /* number of bytes to encrypt */
+ clen = plen + sizeof(*espt);
+ clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
+
+ /* pad length + esp tail */
+ pdlen = clen - plen;
+ tlen = pdlen + sa->icv_len;
+
+ /* do append and prepend */
+ ml = rte_pktmbuf_lastseg(mb);
+ if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
+ return -ENOSPC;
+
+ /* prepend header */
+ ph = rte_pktmbuf_prepend(mb, hlen - l2len);
+ if (ph == NULL)
+ return -ENOSPC;
+
+ /* append tail */
+ pdofs = ml->data_len;
+ ml->data_len += tlen;
+ mb->pkt_len += tlen;
+ pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
+
+ /* update pkt l2/l3 len */
+ mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
+ sa->tx_offload.val;
+
+ /* copy tunnel pkt header */
+ rte_memcpy(ph, sa->hdr, sa->hdr_len);
+
+ /* update original and new ip header fields */
+ update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
+ sqn_low16(sqc));
+
+ /* update spi, seqn and iv */
+ esph = (struct esp_hdr *)(ph + sa->hdr_len);
+ iv = (uint64_t *)(esph + 1);
+ copy_iv(iv, ivp, sa->iv_len);
+
+ esph->spi = sa->spi;
+ esph->seq = sqn_low32(sqc);
+
+ /* offset for ICV */
+ pdofs += pdlen + sa->sqh_len;
+
+ /* pad length */
+ pdlen -= sizeof(*espt);
+
+ /* copy padding data */
+ rte_memcpy(pt, esp_pad_bytes, pdlen);
+
+ /* update esp trailer */
+ espt = (struct esp_tail *)(pt + pdlen);
+ espt->pad_len = pdlen;
+ espt->next_proto = sa->proto;
+
+ icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
+ icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
+
+ return clen;
+}
+
+/*
+ * for pure cryptodev (lookaside none) depending on SA settings,
+ * we might have to write some extra data to the packet.
+ */
+static inline void
+outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const union sym_op_data *icv)
+{
+ uint32_t *psqh;
+ struct aead_gcm_aad *aad;
+
+ /* insert SQN.hi between ESP trailer and ICV */
+ if (sa->sqh_len != 0) {
+ psqh = (uint32_t *)(icv->va - sa->sqh_len);
+ psqh[0] = sqn_hi32(sqc);
+ }
+
+ /*
+ * fill IV and AAD fields, if any (aad fields are placed after icv),
+ * right now we support only one AEAD algorithm: AES-GCM .
+ */
+ if (sa->aad_len != 0) {
+ aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
+ aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
+ }
+}
+
+/*
+ * setup/update packets and crypto ops for ESP outbound tunnel case.
+ */
+uint16_t
+esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, n;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ struct rte_cryptodev_sym_session *cs;
+ union sym_op_data icv;
+ uint64_t iv[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+ cs = ss->crypto.ses;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ k = 0;
+ for (i = 0; i != n; i++) {
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(iv, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
+
+ /* success, setup crypto op */
+ if (rc >= 0) {
+ outb_pkt_xprepare(sa, sqc, &icv);
+ lksd_none_cop_prepare(cop[k], cs, mb[i]);
+ outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
+ k++;
+ /* failure, put packet into the death-row */
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ return k;
+}
+
+/*
+ * setup/update packet data and metadata for ESP outbound transport case.
+ */
+static inline int32_t
+outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
+ uint32_t l2len, uint32_t l3len, union sym_op_data *icv)
+{
+ uint8_t np;
+ uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
+ struct rte_mbuf *ml;
+ struct esp_hdr *esph;
+ struct esp_tail *espt;
+ char *ph, *pt;
+ uint64_t *iv;
+
+ uhlen = l2len + l3len;
+ plen = mb->pkt_len - uhlen;
+
+ /* calculate extra header space required */
+ hlen = sa->iv_len + sizeof(*esph);
+
+ /* number of bytes to encrypt */
+ clen = plen + sizeof(*espt);
+ clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
+
+ /* pad length + esp tail */
+ pdlen = clen - plen;
+ tlen = pdlen + sa->icv_len;
+
+ /* do append and insert */
+ ml = rte_pktmbuf_lastseg(mb);
+ if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
+ return -ENOSPC;
+
+ /* prepend space for ESP header */
+ ph = rte_pktmbuf_prepend(mb, hlen);
+ if (ph == NULL)
+ return -ENOSPC;
+
+ /* append tail */
+ pdofs = ml->data_len;
+ ml->data_len += tlen;
+ mb->pkt_len += tlen;
+ pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
+
+ /* shift L2/L3 headers */
+ insert_esph(ph, ph + hlen, uhlen);
+
+ /* update ip header fields */
+ np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
+ IPPROTO_ESP);
+
+ /* update spi, seqn and iv */
+ esph = (struct esp_hdr *)(ph + uhlen);
+ iv = (uint64_t *)(esph + 1);
+ copy_iv(iv, ivp, sa->iv_len);
+
+ esph->spi = sa->spi;
+ esph->seq = sqn_low32(sqc);
+
+ /* offset for ICV */
+ pdofs += pdlen + sa->sqh_len;
+
+ /* pad length */
+ pdlen -= sizeof(*espt);
+
+ /* copy padding data */
+ rte_memcpy(pt, esp_pad_bytes, pdlen);
+
+ /* update esp trailer */
+ espt = (struct esp_tail *)(pt + pdlen);
+ espt->pad_len = pdlen;
+ espt->next_proto = np;
+
+ icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
+ icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
+
+ return clen;
+}
+
+/*
+ * setup/update packets and crypto ops for ESP outbound transport case.
+ */
+uint16_t
+esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, n, l2, l3;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ struct rte_cryptodev_sym_session *cs;
+ union sym_op_data icv;
+ uint64_t iv[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+ cs = ss->crypto.ses;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ k = 0;
+ for (i = 0; i != n; i++) {
+
+ l2 = mb[i]->l2_len;
+ l3 = mb[i]->l3_len;
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(iv, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], l2, l3, &icv);
+
+ /* success, setup crypto op */
+ if (rc >= 0) {
+ outb_pkt_xprepare(sa, sqc, &icv);
+ lksd_none_cop_prepare(cop[k], cs, mb[i]);
+ outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
+ k++;
+ /* failure, put packet into the death-row */
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ return k;
+}
+
+/*
+ * process outbound packets for SA with ESN support,
+ * for algorithms that require SQN.hibits to be implictly included
+ * into digest computation.
+ * In that case we have to move ICV bytes back to their proper place.
+ */
+uint16_t
+esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ uint16_t num)
+{
+ uint32_t i, k, icv_len, *icv;
+ struct rte_mbuf *ml;
+ struct rte_ipsec_sa *sa;
+ uint32_t dr[num];
+
+ sa = ss->sa;
+
+ k = 0;
+ icv_len = sa->icv_len;
+
+ for (i = 0; i != num; i++) {
+ if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
+ ml = rte_pktmbuf_lastseg(mb[i]);
+ icv = rte_pktmbuf_mtod_offset(ml, void *,
+ ml->data_len - icv_len);
+ remove_sqh(icv, icv_len);
+ k++;
+ } else
+ dr[i - k] = i;
+ }
+
+ /* handle unprocessed mbufs */
+ if (k != num) {
+ rte_errno = EBADMSG;
+ if (k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+ }
+
+ return k;
+}
+
+/*
+ * prepare packets for inline ipsec processing:
+ * set ol_flags and attach metadata.
+ */
+static inline void
+inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ uint32_t i, ol_flags;
+
+ ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
+ for (i = 0; i != num; i++) {
+
+ mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ if (ol_flags != 0)
+ rte_security_set_pkt_metadata(ss->security.ctx,
+ ss->security.ses, mb[i], NULL);
+ }
+}
+
+/*
+ * process group of ESP outbound tunnel packets destined for
+ * INLINE_CRYPTO type of device.
+ */
+uint16_t
+inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, n;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ union sym_op_data icv;
+ uint64_t iv[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ k = 0;
+ for (i = 0; i != n; i++) {
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(iv, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
+
+ k += (rc >= 0);
+
+ /* failure, put packet into the death-row */
+ if (rc < 0) {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not processed mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ inline_outb_mbuf_prepare(ss, mb, k);
+ return k;
+}
+
+/*
+ * process group of ESP outbound transport packets destined for
+ * INLINE_CRYPTO type of device.
+ */
+uint16_t
+inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k, n, l2, l3;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ union sym_op_data icv;
+ uint64_t iv[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+
+ sa = ss->sa;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ k = 0;
+ for (i = 0; i != n; i++) {
+
+ l2 = mb[i]->l2_len;
+ l3 = mb[i]->l3_len;
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(iv, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
+ l2, l3, &icv);
+
+ k += (rc >= 0);
+
+ /* failure, put packet into the death-row */
+ if (rc < 0) {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not processed mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ inline_outb_mbuf_prepare(ss, mb, k);
+ return k;
+}
+
+/*
+ * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ * actual processing is done by HW/PMD, just set flags and metadata.
+ */
+uint16_t
+inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ inline_outb_mbuf_prepare(ss, mb, num);
+ return num;
+}
#endif
}
-/*
- * for given size, calculate required number of buckets.
- */
-static uint32_t
-replay_num_bucket(uint32_t wsz)
-{
- uint32_t nb;
-
- nb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /
- WINDOW_BUCKET_SIZE);
- nb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);
-
- return nb;
-}
-
/*
* According to RFC4303 A2.1, determine the high-order bit of sequence number.
* use 32bit arithmetic inside, return uint64_t.
* between writer and readers.
*/
-/**
- * Based on number of buckets calculated required size for the
- * structure that holds replay window and sequence number (RSN) information.
- */
-static size_t
-rsn_size(uint32_t nb_bucket)
-{
- size_t sz;
- struct replay_sqn *rsn;
-
- sz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);
- sz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
- return sz;
-}
-
/**
* Copy replay window and SQN.
*/
allow_experimental_apis = true
-sources=files('sa.c', 'ses.c')
+sources=files('esp_inb.c', 'esp_outb.c', 'sa.c', 'ses.c')
install_headers = files('rte_ipsec.h', 'rte_ipsec_group.h', 'rte_ipsec_sa.h')
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _MISC_H_
+#define _MISC_H_
+
+/**
+ * @file misc.h
+ * Contains miscelaneous functions/structures/macros used internally
+ * by ipsec library.
+ */
+
+/*
+ * Move bad (unprocessed) mbufs beyond the good (processed) ones.
+ * bad_idx[] contains the indexes of bad mbufs inside the mb[].
+ */
+static inline void
+move_bad_mbufs(struct rte_mbuf *mb[], const uint32_t bad_idx[], uint32_t nb_mb,
+ uint32_t nb_bad)
+{
+ uint32_t i, j, k;
+ struct rte_mbuf *drb[nb_bad];
+
+ j = 0;
+ k = 0;
+
+ /* copy bad ones into a temp place */
+ for (i = 0; i != nb_mb; i++) {
+ if (j != nb_bad && i == bad_idx[j])
+ drb[j++] = mb[i];
+ else
+ mb[k++] = mb[i];
+ }
+
+ /* copy bad ones after the good ones */
+ for (i = 0; i != nb_bad; i++)
+ mb[k + i] = drb[i];
+}
+
+#endif /* _MISC_H_ */
#include "ipsec_sqn.h"
#include "crypto.h"
#include "iph.h"
+#include "misc.h"
#include "pad.h"
#define MBUF_MAX_L2_LEN RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
return sa->type;
}
+/**
+ * Based on number of buckets calculated required size for the
+ * structure that holds replay window and sequence number (RSN) information.
+ */
+static size_t
+rsn_size(uint32_t nb_bucket)
+{
+ size_t sz;
+ struct replay_sqn *rsn;
+
+ sz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);
+ sz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
+ return sz;
+}
+
+/*
+ * for given size, calculate required number of buckets.
+ */
+static uint32_t
+replay_num_bucket(uint32_t wsz)
+{
+ uint32_t nb;
+
+ nb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /
+ WINDOW_BUCKET_SIZE);
+ nb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);
+
+ return nb;
+}
+
static int32_t
ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
{
return sz;
}
-/*
- * Move bad (unprocessed) mbufs beyond the good (processed) ones.
- * bad_idx[] contains the indexes of bad mbufs inside the mb[].
- */
-static inline void
-move_bad_mbufs(struct rte_mbuf *mb[], const uint32_t bad_idx[], uint32_t nb_mb,
- uint32_t nb_bad)
-{
- uint32_t i, j, k;
- struct rte_mbuf *drb[nb_bad];
-
- j = 0;
- k = 0;
-
- /* copy bad ones into a temp place */
- for (i = 0; i != nb_mb; i++) {
- if (j != nb_bad && i == bad_idx[j])
- drb[j++] = mb[i];
- else
- mb[k++] = mb[i];
- }
-
- /* copy bad ones after the good ones */
- for (i = 0; i != nb_bad; i++)
- mb[k + i] = drb[i];
-}
-
-/*
- * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.
- */
-static inline void
-lksd_none_cop_prepare(struct rte_crypto_op *cop,
- struct rte_cryptodev_sym_session *cs, struct rte_mbuf *mb)
-{
- struct rte_crypto_sym_op *sop;
-
- sop = cop->sym;
- cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
- cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
- cop->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
- sop->m_src = mb;
- __rte_crypto_sym_op_attach_sym_session(sop, cs);
-}
-
-/*
- * setup crypto op and crypto sym op for ESP outbound packet.
- */
-static inline void
-esp_outb_cop_prepare(struct rte_crypto_op *cop,
- const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
- const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
-{
- struct rte_crypto_sym_op *sop;
- struct aead_gcm_iv *gcm;
- struct aesctr_cnt_blk *ctr;
- uint8_t algo_type = sa->algo_type;
-
- /* fill sym op fields */
- sop = cop->sym;
-
- switch (algo_type) {
- case ALGO_TYPE_AES_CBC:
- /* Cipher-Auth (AES-CBC *) case */
- case ALGO_TYPE_3DES_CBC:
- /* Cipher-Auth (3DES-CBC *) case */
- case ALGO_TYPE_NULL:
- /* NULL case */
- sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
- sop->cipher.data.length = sa->ctp.cipher.length + plen;
- sop->auth.data.offset = sa->ctp.auth.offset + hlen;
- sop->auth.data.length = sa->ctp.auth.length + plen;
- sop->auth.digest.data = icv->va;
- sop->auth.digest.phys_addr = icv->pa;
- break;
- case ALGO_TYPE_AES_GCM:
- /* AEAD (AES_GCM) case */
- sop->aead.data.offset = sa->ctp.cipher.offset + hlen;
- sop->aead.data.length = sa->ctp.cipher.length + plen;
- sop->aead.digest.data = icv->va;
- sop->aead.digest.phys_addr = icv->pa;
- sop->aead.aad.data = icv->va + sa->icv_len;
- sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
-
- /* fill AAD IV (located inside crypto op) */
- gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
- sa->iv_ofs);
- aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
- break;
- case ALGO_TYPE_AES_CTR:
- /* Cipher-Auth (AES-CTR *) case */
- sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
- sop->cipher.data.length = sa->ctp.cipher.length + plen;
- sop->auth.data.offset = sa->ctp.auth.offset + hlen;
- sop->auth.data.length = sa->ctp.auth.length + plen;
- sop->auth.digest.data = icv->va;
- sop->auth.digest.phys_addr = icv->pa;
-
- ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
- sa->iv_ofs);
- aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
- break;
- default:
- break;
- }
-}
-
-/*
- * setup/update packet data and metadata for ESP outbound tunnel case.
- */
-static inline int32_t
-esp_outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
- const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
- union sym_op_data *icv)
-{
- uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
- struct rte_mbuf *ml;
- struct esp_hdr *esph;
- struct esp_tail *espt;
- char *ph, *pt;
- uint64_t *iv;
-
- /* calculate extra header space required */
- hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
-
- /* size of ipsec protected data */
- l2len = mb->l2_len;
- plen = mb->pkt_len - l2len;
-
- /* number of bytes to encrypt */
- clen = plen + sizeof(*espt);
- clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
-
- /* pad length + esp tail */
- pdlen = clen - plen;
- tlen = pdlen + sa->icv_len;
-
- /* do append and prepend */
- ml = rte_pktmbuf_lastseg(mb);
- if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
- return -ENOSPC;
-
- /* prepend header */
- ph = rte_pktmbuf_prepend(mb, hlen - l2len);
- if (ph == NULL)
- return -ENOSPC;
-
- /* append tail */
- pdofs = ml->data_len;
- ml->data_len += tlen;
- mb->pkt_len += tlen;
- pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
-
- /* update pkt l2/l3 len */
- mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
- sa->tx_offload.val;
-
- /* copy tunnel pkt header */
- rte_memcpy(ph, sa->hdr, sa->hdr_len);
-
- /* update original and new ip header fields */
- update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
- sqn_low16(sqc));
-
- /* update spi, seqn and iv */
- esph = (struct esp_hdr *)(ph + sa->hdr_len);
- iv = (uint64_t *)(esph + 1);
- copy_iv(iv, ivp, sa->iv_len);
-
- esph->spi = sa->spi;
- esph->seq = sqn_low32(sqc);
-
- /* offset for ICV */
- pdofs += pdlen + sa->sqh_len;
-
- /* pad length */
- pdlen -= sizeof(*espt);
-
- /* copy padding data */
- rte_memcpy(pt, esp_pad_bytes, pdlen);
-
- /* update esp trailer */
- espt = (struct esp_tail *)(pt + pdlen);
- espt->pad_len = pdlen;
- espt->next_proto = sa->proto;
-
- icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
- icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
-
- return clen;
-}
-
-/*
- * for pure cryptodev (lookaside none) depending on SA settings,
- * we might have to write some extra data to the packet.
- */
-static inline void
-outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
- const union sym_op_data *icv)
-{
- uint32_t *psqh;
- struct aead_gcm_aad *aad;
- uint8_t algo_type = sa->algo_type;
-
- /* insert SQN.hi between ESP trailer and ICV */
- if (sa->sqh_len != 0) {
- psqh = (uint32_t *)(icv->va - sa->sqh_len);
- psqh[0] = sqn_hi32(sqc);
- }
-
- /*
- * fill IV and AAD fields, if any (aad fields are placed after icv),
- * right now we support only one AEAD algorithm: AES-GCM .
- */
- if (algo_type == ALGO_TYPE_AES_GCM) {
- aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
- aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
- }
-}
-
-/*
- * setup/update packets and crypto ops for ESP outbound tunnel case.
- */
-static uint16_t
-outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
- struct rte_crypto_op *cop[], uint16_t num)
-{
- int32_t rc;
- uint32_t i, k, n;
- uint64_t sqn;
- rte_be64_t sqc;
- struct rte_ipsec_sa *sa;
- struct rte_cryptodev_sym_session *cs;
- union sym_op_data icv;
- uint64_t iv[IPSEC_MAX_IV_QWORD];
- uint32_t dr[num];
-
- sa = ss->sa;
- cs = ss->crypto.ses;
-
- n = num;
- sqn = esn_outb_update_sqn(sa, &n);
- if (n != num)
- rte_errno = EOVERFLOW;
-
- k = 0;
- for (i = 0; i != n; i++) {
-
- sqc = rte_cpu_to_be_64(sqn + i);
- gen_iv(iv, sqc);
-
- /* try to update the packet itself */
- rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
-
- /* success, setup crypto op */
- if (rc >= 0) {
- outb_pkt_xprepare(sa, sqc, &icv);
- lksd_none_cop_prepare(cop[k], cs, mb[i]);
- esp_outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
- k++;
- /* failure, put packet into the death-row */
- } else {
- dr[i - k] = i;
- rte_errno = -rc;
- }
- }
-
- /* copy not prepared mbufs beyond good ones */
- if (k != n && k != 0)
- move_bad_mbufs(mb, dr, n, n - k);
-
- return k;
-}
-
-/*
- * setup/update packet data and metadata for ESP outbound transport case.
- */
-static inline int32_t
-esp_outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
- const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
- uint32_t l2len, uint32_t l3len, union sym_op_data *icv)
-{
- uint8_t np;
- uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
- struct rte_mbuf *ml;
- struct esp_hdr *esph;
- struct esp_tail *espt;
- char *ph, *pt;
- uint64_t *iv;
-
- uhlen = l2len + l3len;
- plen = mb->pkt_len - uhlen;
-
- /* calculate extra header space required */
- hlen = sa->iv_len + sizeof(*esph);
-
- /* number of bytes to encrypt */
- clen = plen + sizeof(*espt);
- clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
-
- /* pad length + esp tail */
- pdlen = clen - plen;
- tlen = pdlen + sa->icv_len;
-
- /* do append and insert */
- ml = rte_pktmbuf_lastseg(mb);
- if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
- return -ENOSPC;
-
- /* prepend space for ESP header */
- ph = rte_pktmbuf_prepend(mb, hlen);
- if (ph == NULL)
- return -ENOSPC;
-
- /* append tail */
- pdofs = ml->data_len;
- ml->data_len += tlen;
- mb->pkt_len += tlen;
- pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
-
- /* shift L2/L3 headers */
- insert_esph(ph, ph + hlen, uhlen);
-
- /* update ip header fields */
- np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
- IPPROTO_ESP);
-
- /* update spi, seqn and iv */
- esph = (struct esp_hdr *)(ph + uhlen);
- iv = (uint64_t *)(esph + 1);
- copy_iv(iv, ivp, sa->iv_len);
-
- esph->spi = sa->spi;
- esph->seq = sqn_low32(sqc);
-
- /* offset for ICV */
- pdofs += pdlen + sa->sqh_len;
-
- /* pad length */
- pdlen -= sizeof(*espt);
-
- /* copy padding data */
- rte_memcpy(pt, esp_pad_bytes, pdlen);
-
- /* update esp trailer */
- espt = (struct esp_tail *)(pt + pdlen);
- espt->pad_len = pdlen;
- espt->next_proto = np;
-
- icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
- icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
-
- return clen;
-}
-
-/*
- * setup/update packets and crypto ops for ESP outbound transport case.
- */
-static uint16_t
-outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
- struct rte_crypto_op *cop[], uint16_t num)
-{
- int32_t rc;
- uint32_t i, k, n, l2, l3;
- uint64_t sqn;
- rte_be64_t sqc;
- struct rte_ipsec_sa *sa;
- struct rte_cryptodev_sym_session *cs;
- union sym_op_data icv;
- uint64_t iv[IPSEC_MAX_IV_QWORD];
- uint32_t dr[num];
-
- sa = ss->sa;
- cs = ss->crypto.ses;
-
- n = num;
- sqn = esn_outb_update_sqn(sa, &n);
- if (n != num)
- rte_errno = EOVERFLOW;
-
- k = 0;
- for (i = 0; i != n; i++) {
-
- l2 = mb[i]->l2_len;
- l3 = mb[i]->l3_len;
-
- sqc = rte_cpu_to_be_64(sqn + i);
- gen_iv(iv, sqc);
-
- /* try to update the packet itself */
- rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
- l2, l3, &icv);
-
- /* success, setup crypto op */
- if (rc >= 0) {
- outb_pkt_xprepare(sa, sqc, &icv);
- lksd_none_cop_prepare(cop[k], cs, mb[i]);
- esp_outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
- k++;
- /* failure, put packet into the death-row */
- } else {
- dr[i - k] = i;
- rte_errno = -rc;
- }
- }
-
- /* copy not prepared mbufs beyond good ones */
- if (k != n && k != 0)
- move_bad_mbufs(mb, dr, n, n - k);
-
- return k;
-}
-
-/*
- * setup crypto op and crypto sym op for ESP inbound tunnel packet.
- */
-static inline int32_t
-esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,
- const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
- const union sym_op_data *icv, uint32_t pofs, uint32_t plen)
-{
- struct rte_crypto_sym_op *sop;
- struct aead_gcm_iv *gcm;
- struct aesctr_cnt_blk *ctr;
- uint64_t *ivc, *ivp;
- uint32_t clen;
- uint8_t algo_type = sa->algo_type;
-
- clen = plen - sa->ctp.cipher.length;
- if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
- return -EINVAL;
-
- /* fill sym op fields */
- sop = cop->sym;
-
- switch (algo_type) {
- case ALGO_TYPE_AES_GCM:
- sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
- sop->aead.data.length = clen;
- sop->aead.digest.data = icv->va;
- sop->aead.digest.phys_addr = icv->pa;
- sop->aead.aad.data = icv->va + sa->icv_len;
- sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
-
- /* fill AAD IV (located inside crypto op) */
- gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
- sa->iv_ofs);
- ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
- pofs + sizeof(struct esp_hdr));
- aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
- break;
- case ALGO_TYPE_AES_CBC:
- case ALGO_TYPE_3DES_CBC:
- sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
- sop->cipher.data.length = clen;
- sop->auth.data.offset = pofs + sa->ctp.auth.offset;
- sop->auth.data.length = plen - sa->ctp.auth.length;
- sop->auth.digest.data = icv->va;
- sop->auth.digest.phys_addr = icv->pa;
-
- /* copy iv from the input packet to the cop */
- ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
- ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
- pofs + sizeof(struct esp_hdr));
- copy_iv(ivc, ivp, sa->iv_len);
- break;
- case ALGO_TYPE_AES_CTR:
- sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
- sop->cipher.data.length = clen;
- sop->auth.data.offset = pofs + sa->ctp.auth.offset;
- sop->auth.data.length = plen - sa->ctp.auth.length;
- sop->auth.digest.data = icv->va;
- sop->auth.digest.phys_addr = icv->pa;
-
- /* copy iv from the input packet to the cop */
- ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
- sa->iv_ofs);
- ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
- pofs + sizeof(struct esp_hdr));
- aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
- break;
- case ALGO_TYPE_NULL:
- sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
- sop->cipher.data.length = clen;
- sop->auth.data.offset = pofs + sa->ctp.auth.offset;
- sop->auth.data.length = plen - sa->ctp.auth.length;
- sop->auth.digest.data = icv->va;
- sop->auth.digest.phys_addr = icv->pa;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * for pure cryptodev (lookaside none) depending on SA settings,
- * we might have to write some extra data to the packet.
- */
-static inline void
-inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
- const union sym_op_data *icv)
-{
- struct aead_gcm_aad *aad;
-
- /* insert SQN.hi between ESP trailer and ICV */
- if (sa->sqh_len != 0)
- insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
-
- /*
- * fill AAD fields, if any (aad fields are placed after icv),
- * right now we support only one AEAD algorithm: AES-GCM.
- */
- if (sa->aad_len != 0) {
- aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
- aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
- }
-}
-
-/*
- * setup/update packet data and metadata for ESP inbound tunnel case.
- */
-static inline int32_t
-esp_inb_tun_pkt_prepare(const struct rte_ipsec_sa *sa,
- const struct replay_sqn *rsn, struct rte_mbuf *mb,
- uint32_t hlen, union sym_op_data *icv)
-{
- int32_t rc;
- uint64_t sqn;
- uint32_t icv_ofs, plen;
- struct rte_mbuf *ml;
- struct esp_hdr *esph;
-
- esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
-
- /*
- * retrieve and reconstruct SQN, then check it, then
- * convert it back into network byte order.
- */
- sqn = rte_be_to_cpu_32(esph->seq);
- if (IS_ESN(sa))
- sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
-
- rc = esn_inb_check_sqn(rsn, sa, sqn);
- if (rc != 0)
- return rc;
-
- sqn = rte_cpu_to_be_64(sqn);
-
- /* start packet manipulation */
- plen = mb->pkt_len;
- plen = plen - hlen;
-
- ml = rte_pktmbuf_lastseg(mb);
- icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;
-
- /* we have to allocate space for AAD somewhere,
- * right now - just use free trailing space at the last segment.
- * Would probably be more convenient to reserve space for AAD
- * inside rte_crypto_op itself
- * (again for IV space is already reserved inside cop).
- */
- if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
- return -ENOSPC;
-
- icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
- icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
-
- inb_pkt_xprepare(sa, sqn, icv);
- return plen;
-}
-
-/*
- * setup/update packets and crypto ops for ESP inbound case.
- */
-static uint16_t
-inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
- struct rte_crypto_op *cop[], uint16_t num)
-{
- int32_t rc;
- uint32_t i, k, hl;
- struct rte_ipsec_sa *sa;
- struct rte_cryptodev_sym_session *cs;
- struct replay_sqn *rsn;
- union sym_op_data icv;
- uint32_t dr[num];
-
- sa = ss->sa;
- cs = ss->crypto.ses;
- rsn = rsn_acquire(sa);
-
- k = 0;
- for (i = 0; i != num; i++) {
-
- hl = mb[i]->l2_len + mb[i]->l3_len;
- rc = esp_inb_tun_pkt_prepare(sa, rsn, mb[i], hl, &icv);
- if (rc >= 0) {
- lksd_none_cop_prepare(cop[k], cs, mb[i]);
- rc = esp_inb_tun_cop_prepare(cop[k], sa, mb[i], &icv,
- hl, rc);
- }
-
- k += (rc == 0);
- if (rc != 0) {
- dr[i - k] = i;
- rte_errno = -rc;
- }
- }
-
- rsn_release(sa, rsn);
-
- /* copy not prepared mbufs beyond good ones */
- if (k != num && k != 0)
- move_bad_mbufs(mb, dr, num, num - k);
-
- return k;
-}
-
/*
* setup crypto ops for LOOKASIDE_PROTO type of devices.
*/
return num;
}
-/*
- * process ESP inbound tunnel packet.
- */
-static inline int
-esp_inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
- uint32_t *sqn)
-{
- uint32_t hlen, icv_len, tlen;
- struct esp_hdr *esph;
- struct esp_tail *espt;
- struct rte_mbuf *ml;
- char *pd;
-
- if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
- return -EBADMSG;
-
- icv_len = sa->icv_len;
-
- ml = rte_pktmbuf_lastseg(mb);
- espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
- ml->data_len - icv_len - sizeof(*espt));
-
- /*
- * check padding and next proto.
- * return an error if something is wrong.
- */
- pd = (char *)espt - espt->pad_len;
- if (espt->next_proto != sa->proto ||
- memcmp(pd, esp_pad_bytes, espt->pad_len))
- return -EINVAL;
-
- /* cut of ICV, ESP tail and padding bytes */
- tlen = icv_len + sizeof(*espt) + espt->pad_len;
- ml->data_len -= tlen;
- mb->pkt_len -= tlen;
-
- /* cut of L2/L3 headers, ESP header and IV */
- hlen = mb->l2_len + mb->l3_len;
- esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
- rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);
-
- /* retrieve SQN for later check */
- *sqn = rte_be_to_cpu_32(esph->seq);
-
- /* reset mbuf metatdata: L2/L3 len, packet type */
- mb->packet_type = RTE_PTYPE_UNKNOWN;
- mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
- sa->tx_offload.val;
-
- /* clear the PKT_RX_SEC_OFFLOAD flag if set */
- mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
- return 0;
-}
-
-/*
- * process ESP inbound transport packet.
- */
-static inline int
-esp_inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
- uint32_t *sqn)
-{
- uint32_t hlen, icv_len, l2len, l3len, tlen;
- struct esp_hdr *esph;
- struct esp_tail *espt;
- struct rte_mbuf *ml;
- char *np, *op, *pd;
-
- if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
- return -EBADMSG;
-
- icv_len = sa->icv_len;
-
- ml = rte_pktmbuf_lastseg(mb);
- espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,
- ml->data_len - icv_len - sizeof(*espt));
-
- /* check padding, return an error if something is wrong. */
- pd = (char *)espt - espt->pad_len;
- if (memcmp(pd, esp_pad_bytes, espt->pad_len))
- return -EINVAL;
-
- /* cut of ICV, ESP tail and padding bytes */
- tlen = icv_len + sizeof(*espt) + espt->pad_len;
- ml->data_len -= tlen;
- mb->pkt_len -= tlen;
-
- /* retrieve SQN for later check */
- l2len = mb->l2_len;
- l3len = mb->l3_len;
- hlen = l2len + l3len;
- op = rte_pktmbuf_mtod(mb, char *);
- esph = (struct esp_hdr *)(op + hlen);
- *sqn = rte_be_to_cpu_32(esph->seq);
-
- /* cut off ESP header and IV, update L3 header */
- np = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);
- remove_esph(np, op, hlen);
- update_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,
- espt->next_proto);
-
- /* reset mbuf packet type */
- mb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
-
- /* clear the PKT_RX_SEC_OFFLOAD flag if set */
- mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);
- return 0;
-}
-
-/*
- * for group of ESP inbound packets perform SQN check and update.
- */
-static inline uint16_t
-esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
- uint32_t dr[], uint16_t num)
-{
- uint32_t i, k;
- struct replay_sqn *rsn;
-
- rsn = rsn_update_start(sa);
-
- k = 0;
- for (i = 0; i != num; i++) {
- if (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)
- k++;
- else
- dr[i - k] = i;
- }
-
- rsn_update_finish(sa, rsn);
- return k;
-}
-
-/*
- * process group of ESP inbound tunnel packets.
- */
-static uint16_t
-inb_tun_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
- uint16_t num)
-{
- uint32_t i, k, n;
- struct rte_ipsec_sa *sa;
- uint32_t sqn[num];
- uint32_t dr[num];
-
- sa = ss->sa;
-
- /* process packets, extract seq numbers */
-
- k = 0;
- for (i = 0; i != num; i++) {
- /* good packet */
- if (esp_inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)
- k++;
- /* bad packet, will drop from furhter processing */
- else
- dr[i - k] = i;
- }
-
- /* handle unprocessed mbufs */
- if (k != num && k != 0)
- move_bad_mbufs(mb, dr, num, num - k);
-
- /* update SQN and replay winow */
- n = esp_inb_rsn_update(sa, sqn, dr, k);
-
- /* handle mbufs with wrong SQN */
- if (n != k && n != 0)
- move_bad_mbufs(mb, dr, k, k - n);
-
- if (n != num)
- rte_errno = EBADMSG;
-
- return n;
-}
-
-/*
- * process group of ESP inbound transport packets.
- */
-static uint16_t
-inb_trs_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
- uint16_t num)
-{
- uint32_t i, k, n;
- uint32_t sqn[num];
- struct rte_ipsec_sa *sa;
- uint32_t dr[num];
-
- sa = ss->sa;
-
- /* process packets, extract seq numbers */
-
- k = 0;
- for (i = 0; i != num; i++) {
- /* good packet */
- if (esp_inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)
- k++;
- /* bad packet, will drop from furhter processing */
- else
- dr[i - k] = i;
- }
-
- /* handle unprocessed mbufs */
- if (k != num && k != 0)
- move_bad_mbufs(mb, dr, num, num - k);
-
- /* update SQN and replay winow */
- n = esp_inb_rsn_update(sa, sqn, dr, k);
-
- /* handle mbufs with wrong SQN */
- if (n != k && n != 0)
- move_bad_mbufs(mb, dr, k, k - n);
-
- if (n != num)
- rte_errno = EBADMSG;
-
- return n;
-}
-
-/*
- * process outbound packets for SA with ESN support,
- * for algorithms that require SQN.hibits to be implictly included
- * into digest computation.
- * In that case we have to move ICV bytes back to their proper place.
- */
-static uint16_t
-outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
- uint16_t num)
-{
- uint32_t i, k, icv_len, *icv;
- struct rte_mbuf *ml;
- struct rte_ipsec_sa *sa;
- uint32_t dr[num];
-
- sa = ss->sa;
-
- k = 0;
- icv_len = sa->icv_len;
-
- for (i = 0; i != num; i++) {
- if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
- ml = rte_pktmbuf_lastseg(mb[i]);
- icv = rte_pktmbuf_mtod_offset(ml, void *,
- ml->data_len - icv_len);
- remove_sqh(icv, icv_len);
- k++;
- } else
- dr[i - k] = i;
- }
-
- /* handle unprocessed mbufs */
- if (k != num) {
- rte_errno = EBADMSG;
- if (k != 0)
- move_bad_mbufs(mb, dr, num, num - k);
- }
-
- return k;
-}
-
/*
* simplest pkt process routine:
* all actual processing is already done by HW/PMD,
return k;
}
-/*
- * prepare packets for inline ipsec processing:
- * set ol_flags and attach metadata.
- */
-static inline void
-inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
- struct rte_mbuf *mb[], uint16_t num)
-{
- uint32_t i, ol_flags;
-
- ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
- for (i = 0; i != num; i++) {
-
- mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
- if (ol_flags != 0)
- rte_security_set_pkt_metadata(ss->security.ctx,
- ss->security.ses, mb[i], NULL);
- }
-}
-
-/*
- * process group of ESP outbound tunnel packets destined for
- * INLINE_CRYPTO type of device.
- */
-static uint16_t
-inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
- struct rte_mbuf *mb[], uint16_t num)
-{
- int32_t rc;
- uint32_t i, k, n;
- uint64_t sqn;
- rte_be64_t sqc;
- struct rte_ipsec_sa *sa;
- union sym_op_data icv;
- uint64_t iv[IPSEC_MAX_IV_QWORD];
- uint32_t dr[num];
-
- sa = ss->sa;
-
- n = num;
- sqn = esn_outb_update_sqn(sa, &n);
- if (n != num)
- rte_errno = EOVERFLOW;
-
- k = 0;
- for (i = 0; i != n; i++) {
-
- sqc = rte_cpu_to_be_64(sqn + i);
- gen_iv(iv, sqc);
-
- /* try to update the packet itself */
- rc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);
-
- k += (rc >= 0);
-
- /* failure, put packet into the death-row */
- if (rc < 0) {
- dr[i - k] = i;
- rte_errno = -rc;
- }
- }
-
- /* copy not processed mbufs beyond good ones */
- if (k != n && k != 0)
- move_bad_mbufs(mb, dr, n, n - k);
-
- inline_outb_mbuf_prepare(ss, mb, k);
- return k;
-}
-
-/*
- * process group of ESP outbound transport packets destined for
- * INLINE_CRYPTO type of device.
- */
-static uint16_t
-inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
- struct rte_mbuf *mb[], uint16_t num)
-{
- int32_t rc;
- uint32_t i, k, n, l2, l3;
- uint64_t sqn;
- rte_be64_t sqc;
- struct rte_ipsec_sa *sa;
- union sym_op_data icv;
- uint64_t iv[IPSEC_MAX_IV_QWORD];
- uint32_t dr[num];
-
- sa = ss->sa;
-
- n = num;
- sqn = esn_outb_update_sqn(sa, &n);
- if (n != num)
- rte_errno = EOVERFLOW;
-
- k = 0;
- for (i = 0; i != n; i++) {
-
- l2 = mb[i]->l2_len;
- l3 = mb[i]->l3_len;
-
- sqc = rte_cpu_to_be_64(sqn + i);
- gen_iv(iv, sqc);
-
- /* try to update the packet itself */
- rc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
- l2, l3, &icv);
-
- k += (rc >= 0);
-
- /* failure, put packet into the death-row */
- if (rc < 0) {
- dr[i - k] = i;
- rte_errno = -rc;
- }
- }
-
- /* copy not processed mbufs beyond good ones */
- if (k != n && k != 0)
- move_bad_mbufs(mb, dr, n, n - k);
-
- inline_outb_mbuf_prepare(ss, mb, k);
- return k;
-}
-
-/*
- * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
- * actual processing is done by HW/PMD, just set flags and metadata.
- */
-static uint16_t
-outb_inline_proto_process(const struct rte_ipsec_session *ss,
- struct rte_mbuf *mb[], uint16_t num)
-{
- inline_outb_mbuf_prepare(ss, mb, num);
- return num;
-}
-
/*
* Select packet processing function for session on LOOKASIDE_NONE
* type of device.
switch (sa->type & msk) {
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
- pf->prepare = inb_pkt_prepare;
- pf->process = inb_tun_pkt_process;
+ pf->prepare = esp_inb_pkt_prepare;
+ pf->process = esp_inb_tun_pkt_process;
break;
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
- pf->prepare = inb_pkt_prepare;
- pf->process = inb_trs_pkt_process;
+ pf->prepare = esp_inb_pkt_prepare;
+ pf->process = esp_inb_trs_pkt_process;
break;
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
- pf->prepare = outb_tun_prepare;
+ pf->prepare = esp_outb_tun_prepare;
pf->process = (sa->sqh_len != 0) ?
- outb_sqh_process : pkt_flag_process;
+ esp_outb_sqh_process : pkt_flag_process;
break;
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
- pf->prepare = outb_trs_prepare;
+ pf->prepare = esp_outb_trs_prepare;
pf->process = (sa->sqh_len != 0) ?
- outb_sqh_process : pkt_flag_process;
+ esp_outb_sqh_process : pkt_flag_process;
break;
default:
rc = -ENOTSUP;
switch (sa->type & msk) {
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
- pf->process = inb_tun_pkt_process;
+ pf->process = esp_inb_tun_pkt_process;
break;
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
- pf->process = inb_trs_pkt_process;
+ pf->process = esp_inb_trs_pkt_process;
break;
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
RTE_IPSEC_SATP_DIR_IB)
pf->process = pkt_flag_process;
else
- pf->process = outb_inline_proto_process;
+ pf->process = inline_proto_outb_pkt_process;
break;
case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
pf->prepare = lksd_proto_prepare;
ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf);
+/* inbound processing */
+
+uint16_t
+esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num);
+
+uint16_t
+esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+/* outbound processing */
+
+uint16_t
+esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num);
+
+uint16_t
+esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ struct rte_crypto_op *cop[], uint16_t num);
+
+uint16_t
+esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+ uint16_t num);
+
+uint16_t
+inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
#endif /* _SA_H_ */