.. SPDX-License-Identifier: BSD-3-Clause
- Copyright(c) 2018 Intel Corporation.
+ Copyright(c) 2018-2020 Intel Corporation.
IPsec Packet Processing Library
===============================
- verify that crypto device operations (encryption, ICV generation)
were completed successfully
+RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In that mode the library functions perform same operations as in
+``RTE_SECURITY_ACTION_TYPE_NONE``. The only difference is that crypto operations
+are performed with CPU crypto synchronous API.
+
+
RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
*/
#include <rte_ipsec.h>
}
}
+static inline uint32_t
+inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+ uint32_t *pofs, uint32_t plen, void *iv)
+{
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint64_t *ivp;
+ uint32_t clen;
+
+ ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+ *pofs + sizeof(struct rte_esp_hdr));
+ clen = 0;
+
+ switch (sa->algo_type) {
+ case ALGO_TYPE_AES_GCM:
+ gcm = (struct aead_gcm_iv *)iv;
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_AES_CBC:
+ case ALGO_TYPE_3DES_CBC:
+ copy_iv(iv, ivp, sa->iv_len);
+ break;
+ case ALGO_TYPE_AES_CTR:
+ ctr = (struct aesctr_cnt_blk *)iv;
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ }
+
+ *pofs += sa->ctp.auth.offset;
+ clen = plen - sa->ctp.auth.length;
+ return clen;
+}
+
/*
* Helper function for prepare() to deal with situation when
* ICV is spread by two segments. Tries to move ICV completely into the
}
}
-/*
- * setup/update packet data and metadata for ESP inbound tunnel case.
- */
-static inline int32_t
-inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
- struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
+static inline int
+inb_get_sqn(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
+ struct rte_mbuf *mb, uint32_t hlen, rte_be64_t *sqc)
{
int32_t rc;
uint64_t sqn;
- uint32_t clen, icv_len, icv_ofs, plen;
- struct rte_mbuf *ml;
struct rte_esp_hdr *esph;
esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
sqn = rte_be_to_cpu_32(esph->seq);
if (IS_ESN(sa))
sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
+ *sqc = rte_cpu_to_be_64(sqn);
+ /* check IPsec window */
rc = esn_inb_check_sqn(rsn, sa, sqn);
- if (rc != 0)
- return rc;
- sqn = rte_cpu_to_be_64(sqn);
+ return rc;
+}
+
+/* prepare packet for upcoming processing */
+static inline int32_t
+inb_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+ uint32_t hlen, union sym_op_data *icv)
+{
+ uint32_t clen, icv_len, icv_ofs, plen;
+ struct rte_mbuf *ml;
/* start packet manipulation */
plen = mb->pkt_len;
icv_ofs += sa->sqh_len;
- /* we have to allocate space for AAD somewhere,
+ /*
+ * we have to allocate space for AAD somewhere,
* right now - just use free trailing space at the last segment.
* Would probably be more convenient to reserve space for AAD
* inside rte_crypto_op itself
mb->pkt_len += sa->sqh_len;
ml->data_len += sa->sqh_len;
- inb_pkt_xprepare(sa, sqn, icv);
return plen;
}
+static inline int32_t
+inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
+ struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
+{
+ int rc;
+ rte_be64_t sqn;
+
+ rc = inb_get_sqn(sa, rsn, mb, hlen, &sqn);
+ if (rc != 0)
+ return rc;
+
+ rc = inb_prepare(sa, mb, hlen, icv);
+ if (rc < 0)
+ return rc;
+
+ inb_pkt_xprepare(sa, sqn, icv);
+ return rc;
+}
+
/*
* setup/update packets and crypto ops for ESP inbound case.
*/
lksd_none_cop_prepare(cop[k], cs, mb[i]);
inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
k++;
- } else
+ } else {
dr[i - k] = i;
+ rte_errno = -rc;
+ }
}
rsn_release(sa, rsn);
/* copy not prepared mbufs beyond good ones */
- if (k != num && k != 0) {
+ if (k != num && k != 0)
move_bad_mbufs(mb, dr, num, num - k);
- rte_errno = EBADMSG;
- }
return k;
}
return k;
}
-
/*
* *process* function for tunnel packets
*/
if (k != num && k != 0)
move_bad_mbufs(mb, dr, num, num - k);
- /* update SQN and replay winow */
+ /* update SQN and replay window */
n = esp_inb_rsn_update(sa, sqn, dr, k);
/* handle mbufs with wrong SQN */
return n;
}
+/*
+ * Prepare (plus actual crypto/auth) routine for inbound CPU-CRYPTO
+ * (synchronous mode).
+ */
+uint16_t
+cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ int32_t rc;
+ uint32_t i, k;
+ struct rte_ipsec_sa *sa;
+ struct replay_sqn *rsn;
+ union sym_op_data icv;
+ void *iv[num];
+ void *aad[num];
+ void *dgst[num];
+ uint32_t dr[num];
+ uint32_t l4ofs[num];
+ uint32_t clen[num];
+ uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
+
+ sa = ss->sa;
+
+ /* grab rsn lock */
+ rsn = rsn_acquire(sa);
+
+ /* do preparation for all packets */
+ for (i = 0, k = 0; i != num; i++) {
+
+ /* calculate ESP header offset */
+ l4ofs[k] = mb[i]->l2_len + mb[i]->l3_len;
+
+ /* prepare ESP packet for processing */
+ rc = inb_pkt_prepare(sa, rsn, mb[i], l4ofs[k], &icv);
+ if (rc >= 0) {
+ /* get encrypted data offset and length */
+ clen[k] = inb_cpu_crypto_prepare(sa, mb[i],
+ l4ofs + k, rc, ivbuf[k]);
+
+ /* fill iv, digest and aad */
+ iv[k] = ivbuf[k];
+ aad[k] = icv.va + sa->icv_len;
+ dgst[k++] = icv.va;
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* release rsn lock */
+ rsn_release(sa, rsn);
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != num && k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
+
+ /* convert mbufs to iovecs and do actual crypto/auth processing */
+ cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst, l4ofs, clen, k);
+ return k;
+}
+
/*
* process group of ESP inbound tunnel packets.
*/
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
*/
#include <rte_ipsec.h>
#include "misc.h"
#include "pad.h"
+typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
+ const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
+ union sym_op_data *icv, uint8_t sqh_len);
/*
* helper function to fill crypto_sym op for cipher+auth algorithms.
espt->pad_len = pdlen;
espt->next_proto = sa->proto;
+ /* set icv va/pa value(s) */
icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
static inline int32_t
outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
- uint32_t l2len, uint32_t l3len, union sym_op_data *icv,
- uint8_t sqh_len)
+ union sym_op_data *icv, uint8_t sqh_len)
{
uint8_t np;
uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
struct rte_esp_tail *espt;
char *ph, *pt;
uint64_t *iv;
+ uint32_t l2len, l3len;
+
+ l2len = mb->l2_len;
+ l3len = mb->l3_len;
uhlen = l2len + l3len;
plen = mb->pkt_len - uhlen;
espt->pad_len = pdlen;
espt->next_proto = np;
+ /* set icv va/pa value(s) */
icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
gen_iv(iv, sqc);
/* try to update the packet itself */
- rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], l2, l3, &icv,
- sa->sqh_len);
+ rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
+ sa->sqh_len);
/* success, setup crypto op */
if (rc >= 0) {
outb_pkt_xprepare(sa, sqc, &icv);
return k;
}
+
+static inline uint32_t
+outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
+ uint32_t plen, void *iv)
+{
+ uint64_t *ivp = iv;
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint32_t clen;
+
+ switch (sa->algo_type) {
+ case ALGO_TYPE_AES_GCM:
+ gcm = iv;
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_AES_CTR:
+ ctr = iv;
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ }
+
+ *pofs += sa->ctp.auth.offset;
+ clen = plen + sa->ctp.auth.length;
+ return clen;
+}
+
+static uint16_t
+cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num,
+ esp_outb_prepare_t prepare, uint32_t cofs_mask)
+{
+ int32_t rc;
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ uint32_t i, k, n;
+ uint32_t l2, l3;
+ union sym_op_data icv;
+ void *iv[num];
+ void *aad[num];
+ void *dgst[num];
+ uint32_t dr[num];
+ uint32_t l4ofs[num];
+ uint32_t clen[num];
+ uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
+
+ sa = ss->sa;
+
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ for (i = 0, k = 0; i != n; i++) {
+
+ l2 = mb[i]->l2_len;
+ l3 = mb[i]->l3_len;
+
+ /* calculate ESP header offset */
+ l4ofs[k] = (l2 + l3) & cofs_mask;
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(ivbuf[k], sqc);
+
+ /* try to update the packet itself */
+ rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
+
+ /* success, proceed with preparations */
+ if (rc >= 0) {
+
+ outb_pkt_xprepare(sa, sqc, &icv);
+
+ /* get encrypted data offset and length */
+ clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
+ ivbuf[k]);
+
+ /* fill iv, digest and aad */
+ iv[k] = ivbuf[k];
+ aad[k] = icv.va + sa->icv_len;
+ dgst[k++] = icv.va;
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ /* convert mbufs to iovecs and do actual crypto/auth processing */
+ cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst, l4ofs, clen, k);
+ return k;
+}
+
+uint16_t
+cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0);
+}
+
+uint16_t
+cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare,
+ UINT32_MAX);
+}
+
/*
* process outbound packets for SA with ESN support,
* for algorithms that require SQN.hibits to be implictly included
struct rte_mbuf *mb[], uint16_t num)
{
int32_t rc;
- uint32_t i, k, n, l2, l3;
+ uint32_t i, k, n;
uint64_t sqn;
rte_be64_t sqc;
struct rte_ipsec_sa *sa;
k = 0;
for (i = 0; i != n; i++) {
- l2 = mb[i]->l2_len;
- l3 = mb[i]->l3_len;
-
sqc = rte_cpu_to_be_64(sqn + i);
gen_iv(iv, sqc);
/* try to update the packet itself */
- rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
- l2, l3, &icv, 0);
+ rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
k += (rc >= 0);
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
*/
#ifndef _MISC_H_
mb->pkt_len -= len;
}
+/*
+ * process packets using sync crypto engine
+ */
+static inline void
+cpu_crypto_bulk(const struct rte_ipsec_session *ss,
+ union rte_crypto_sym_ofs ofs, struct rte_mbuf *mb[],
+ void *iv[], void *aad[], void *dgst[], uint32_t l4ofs[],
+ uint32_t clen[], uint32_t num)
+{
+ uint32_t i, j, n;
+ int32_t vcnt, vofs;
+ int32_t st[num];
+ struct rte_crypto_sgl vecpkt[num];
+ struct rte_crypto_vec vec[UINT8_MAX];
+ struct rte_crypto_sym_vec symvec;
+
+ const uint32_t vnum = RTE_DIM(vec);
+
+ j = 0, n = 0;
+ vofs = 0;
+ for (i = 0; i != num; i++) {
+
+ vcnt = rte_crypto_mbuf_to_vec(mb[i], l4ofs[i], clen[i],
+ &vec[vofs], vnum - vofs);
+
+ /* not enough space in vec[] to hold all segments */
+ if (vcnt < 0) {
+ /* fill the request structure */
+ symvec.sgl = &vecpkt[j];
+ symvec.iv = &iv[j];
+ symvec.aad = &aad[j];
+ symvec.digest = &dgst[j];
+ symvec.status = &st[j];
+ symvec.num = i - j;
+
+ /* flush vec array and try again */
+ n += rte_cryptodev_sym_cpu_crypto_process(
+ ss->crypto.dev_id, ss->crypto.ses, ofs,
+ &symvec);
+ vofs = 0;
+ vcnt = rte_crypto_mbuf_to_vec(mb[i], l4ofs[i], clen[i],
+ vec, vnum);
+ RTE_ASSERT(vcnt > 0);
+ j = i;
+ }
+
+ vecpkt[i].vec = &vec[vofs];
+ vecpkt[i].num = vcnt;
+ vofs += vcnt;
+ }
+
+ /* fill the request structure */
+ symvec.sgl = &vecpkt[j];
+ symvec.iv = &iv[j];
+ symvec.aad = &aad[j];
+ symvec.digest = &dgst[j];
+ symvec.status = &st[j];
+ symvec.num = i - j;
+
+ n += rte_cryptodev_sym_cpu_crypto_process(ss->crypto.dev_id,
+ ss->crypto.ses, ofs, &symvec);
+
+ j = num - n;
+ for (i = 0; j != 0 && i != num; i++) {
+ if (st[i] != 0) {
+ mb[i]->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ j--;
+ }
+ }
+}
+
#endif /* _MISC_H_ */
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
*/
#ifndef _RTE_IPSEC_H_
* (see rte_ipsec_pkt_process for more details).
*/
struct rte_ipsec_sa_pkt_func {
- uint16_t (*prepare)(const struct rte_ipsec_session *ss,
+ union {
+ uint16_t (*async)(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[],
struct rte_crypto_op *cop[],
uint16_t num);
+ uint16_t (*sync)(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[],
+ uint16_t num);
+ } prepare;
uint16_t (*process)(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[],
uint16_t num);
union {
struct {
struct rte_cryptodev_sym_session *ses;
+ uint8_t dev_id;
} crypto;
struct {
struct rte_security_session *ses;
rte_ipsec_pkt_crypto_prepare(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
{
- return ss->pkt_func.prepare(ss, mb, cop, num);
+ return ss->pkt_func.prepare.async(ss, mb, cop, num);
+}
+
+__rte_experimental
+static inline uint16_t
+rte_ipsec_pkt_cpu_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return ss->pkt_func.prepare.sync(ss, mb, num);
}
/**
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
*/
#include <rte_ipsec.h>
esp_inb_init(struct rte_ipsec_sa *sa)
{
/* these params may differ with new algorithms support */
- sa->ctp.auth.offset = 0;
- sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
sa->ctp.cipher.offset = sizeof(struct rte_esp_hdr) + sa->iv_len;
sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
+
+ /*
+ * for AEAD and NULL algorithms we can assume that
+ * auth and cipher offsets would be equal.
+ */
+ switch (sa->algo_type) {
+ case ALGO_TYPE_AES_GCM:
+ case ALGO_TYPE_NULL:
+ sa->ctp.auth.raw = sa->ctp.cipher.raw;
+ break;
+ default:
+ sa->ctp.auth.offset = 0;
+ sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
+ sa->cofs.ofs.cipher.tail = sa->sqh_len;
+ break;
+ }
+
+ sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
}
/*
sa->sqn.outb.raw = 1;
- /* these params may differ with new algorithms support */
- sa->ctp.auth.offset = hlen;
- sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
- sa->iv_len + sa->sqh_len;
-
algo_type = sa->algo_type;
+ /*
+ * Setup auth and cipher length and offset.
+ * these params may differ with new algorithms support
+ */
+
switch (algo_type) {
case ALGO_TYPE_AES_GCM:
case ALGO_TYPE_AES_CTR:
break;
case ALGO_TYPE_AES_CBC:
case ALGO_TYPE_3DES_CBC:
- sa->ctp.cipher.offset = sa->hdr_len +
- sizeof(struct rte_esp_hdr);
+ sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr);
sa->ctp.cipher.length = sa->iv_len;
break;
}
+
+ /*
+ * for AEAD and NULL algorithms we can assume that
+ * auth and cipher offsets would be equal.
+ */
+ switch (algo_type) {
+ case ALGO_TYPE_AES_GCM:
+ case ALGO_TYPE_NULL:
+ sa->ctp.auth.raw = sa->ctp.cipher.raw;
+ break;
+ default:
+ sa->ctp.auth.offset = hlen;
+ sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
+ sa->iv_len + sa->sqh_len;
+ break;
+ }
+
+ sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
+ sa->cofs.ofs.cipher.tail = (sa->ctp.auth.offset + sa->ctp.auth.length) -
+ (sa->ctp.cipher.offset + sa->ctp.cipher.length);
}
/*
* - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
* - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
*/
-static uint16_t
-pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
- uint16_t num)
+uint16_t
+pkt_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
{
uint32_t i, k;
uint32_t dr[num];
switch (sa->type & msk) {
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
- pf->prepare = esp_inb_pkt_prepare;
+ pf->prepare.async = esp_inb_pkt_prepare;
pf->process = esp_inb_tun_pkt_process;
break;
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
- pf->prepare = esp_inb_pkt_prepare;
+ pf->prepare.async = esp_inb_pkt_prepare;
pf->process = esp_inb_trs_pkt_process;
break;
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
- pf->prepare = esp_outb_tun_prepare;
+ pf->prepare.async = esp_outb_tun_prepare;
pf->process = (sa->sqh_len != 0) ?
esp_outb_sqh_process : pkt_flag_process;
break;
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
- pf->prepare = esp_outb_trs_prepare;
+ pf->prepare.async = esp_outb_trs_prepare;
+ pf->process = (sa->sqh_len != 0) ?
+ esp_outb_sqh_process : pkt_flag_process;
+ break;
+ default:
+ rc = -ENOTSUP;
+ }
+
+ return rc;
+}
+
+static int
+cpu_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
+ struct rte_ipsec_sa_pkt_func *pf)
+{
+ int32_t rc;
+
+ static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+ RTE_IPSEC_SATP_MODE_MASK;
+
+ rc = 0;
+ switch (sa->type & msk) {
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->prepare.sync = cpu_inb_pkt_prepare;
+ pf->process = esp_inb_tun_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->prepare.sync = cpu_inb_pkt_prepare;
+ pf->process = esp_inb_trs_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->prepare.sync = cpu_outb_tun_pkt_prepare;
+ pf->process = (sa->sqh_len != 0) ?
+ esp_outb_sqh_process : pkt_flag_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->prepare.sync = cpu_outb_trs_pkt_prepare;
pf->process = (sa->sqh_len != 0) ?
esp_outb_sqh_process : pkt_flag_process;
break;
int32_t rc;
rc = 0;
- pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 };
+ pf[0] = (struct rte_ipsec_sa_pkt_func) { {NULL}, NULL };
switch (ss->type) {
case RTE_SECURITY_ACTION_TYPE_NONE:
pf->process = inline_proto_outb_pkt_process;
break;
case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
- pf->prepare = lksd_proto_prepare;
+ pf->prepare.async = lksd_proto_prepare;
pf->process = pkt_flag_process;
break;
+ case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+ rc = cpu_crypto_pkt_func_select(sa, pf);
+ break;
default:
rc = -ENOTSUP;
}
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
*/
#ifndef _SA_H_
union sym_op_ofslen cipher;
union sym_op_ofslen auth;
} ctp;
+ /* cpu-crypto offsets */
+ union rte_crypto_sym_ofs cofs;
/* tx_offload template for tunnel mbuf */
struct {
uint64_t msk;
inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
/* outbound processing */
uint16_t
esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
uint16_t num);
+uint16_t
+pkt_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
uint16_t
inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
#endif /* _SA_H_ */
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
*/
#include <rte_ipsec.h>
if (ss == NULL || ss->sa == NULL)
return -EINVAL;
- if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
+ if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE ||
+ ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
if (ss->crypto.ses == NULL)
return -EINVAL;
} else {