*/
#include <rte_cryptodev.h>
+#include <rte_esp.h>
#include <rte_ethdev.h>
#include <rte_eventdev.h>
+#include <rte_ip.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_security.h>
#include <rte_security_driver.h>
+#include <rte_udp.h>
+#include "otx2_common.h"
#include "otx2_cryptodev_qp.h"
#include "otx2_ethdev.h"
#include "otx2_ethdev_sec.h"
#include "otx2_ipsec_fp.h"
#include "otx2_sec_idev.h"
+#include "otx2_security.h"
-#define ETH_SEC_MAX_PKT_LEN 1450
+#define ERR_STR_SZ 256
struct eth_sec_tag_const {
RTE_STD_C11
}
};
+static void
+lookup_mem_sa_tbl_clear(struct rte_eth_dev *eth_dev)
+{
+ static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
+ uint16_t port = eth_dev->data->port_id;
+ const struct rte_memzone *mz;
+ uint64_t **sa_tbl;
+ uint8_t *mem;
+
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL)
+ return;
+
+ mem = mz->addr;
+
+ sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
+ if (sa_tbl[port] == NULL)
+ return;
+
+ rte_free(sa_tbl[port]);
+ sa_tbl[port] = NULL;
+}
+
+static int
+lookup_mem_sa_index_update(struct rte_eth_dev *eth_dev, int spi, void *sa,
+ char *err_str)
+{
+ static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t port = eth_dev->data->port_id;
+ const struct rte_memzone *mz;
+ uint64_t **sa_tbl;
+ uint8_t *mem;
+
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL) {
+ snprintf(err_str, ERR_STR_SZ,
+ "Could not find fastpath lookup table");
+ return -EINVAL;
+ }
+
+ mem = mz->addr;
+
+ sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
+
+ if (sa_tbl[port] == NULL) {
+ sa_tbl[port] = rte_malloc(NULL, dev->ipsec_in_max_spi *
+ sizeof(uint64_t), 0);
+ }
+
+ sa_tbl[port][spi] = (uint64_t)sa;
+
+ return 0;
+}
+
static inline void
in_sa_mz_name_get(char *name, int size, uint16_t port)
{
return sa + sa_index;
}
+static int
+ipsec_sa_const_set(struct rte_security_ipsec_xform *ipsec,
+ struct rte_crypto_sym_xform *xform,
+ struct otx2_sec_session_ipsec_ip *sess)
+{
+ struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
+
+ sess->partial_len = sizeof(struct rte_ipv4_hdr);
+
+ if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+ sess->partial_len += sizeof(struct rte_esp_hdr);
+ sess->roundup_len = sizeof(struct rte_esp_tail);
+ } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
+ sess->partial_len += OTX2_SEC_AH_HDR_LEN;
+ } else {
+ return -EINVAL;
+ }
+
+ if (ipsec->options.udp_encap)
+ sess->partial_len += sizeof(struct rte_udp_hdr);
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ sess->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
+ sess->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
+ sess->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
+ }
+ return 0;
+ }
+
+ if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ } else {
+ return -EINVAL;
+ }
+ if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
+ sess->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
+ sess->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
+ } else {
+ return -EINVAL;
+ }
+
+ if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
+ sess->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
static int
hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp,
const uint8_t *auth_key, int len, uint8_t *hmac_key)
timeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz();
- rte_cio_wmb();
+ rte_io_wmb();
do {
otx2_lmt_mov(qp->lmtline, &inst, 2);
struct otx2_ipsec_fp_sa_ctl *ctl;
struct otx2_ipsec_fp_out_sa *sa;
struct otx2_sec_session *priv;
+ struct otx2_cpt_inst_s inst;
struct otx2_cpt_qp *qp;
priv = get_sec_session_private_data(sec_sess);
+ priv->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
sess = &priv->ipsec.ip;
sa = &sess->out_sa;
memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip));
+ sess->seq = 1;
+
+ ret = ipsec_sa_const_set(ipsec, crypto_xform, sess);
+ if (ret < 0)
+ return ret;
+
if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
memcpy(sa->nonce, &ipsec->salt, 4);
}
if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+ /* Start ip id from 1 */
+ sess->ip_id = 1;
+
if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip,
sizeof(struct in_addr));
else
return -EINVAL;
+ /* Determine word 7 of CPT instruction */
+ inst.u64[7] = 0;
+ inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
+ inst.cptr = rte_mempool_virt2iova(sa);
+ sess->inst_w7 = inst.u64[7];
+
/* Get CPT QP to be used for this SA */
ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
if (ret)
goto cpt_put;
}
+ rte_io_wmb();
+ ctl->valid = 1;
+
return 0;
cpt_put:
otx2_sec_idev_tx_cpt_qp_put(sess->qp);
struct otx2_ipsec_fp_sa_ctl *ctl;
struct otx2_ipsec_fp_in_sa *sa;
struct otx2_sec_session *priv;
+ char err_str[ERR_STR_SZ];
struct otx2_cpt_qp *qp;
+ memset(err_str, 0, ERR_STR_SZ);
+
if (ipsec->spi >= dev->ipsec_in_max_spi) {
otx2_err("SPI exceeds max supported");
return -EINVAL;
}
sa = in_sa_get(port, ipsec->spi);
+ if (sa == NULL)
+ return -ENOMEM;
+
ctl = &sa->ctl;
priv = get_sec_session_private_data(sec_sess);
+ priv->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
sess = &priv->ipsec.ip;
+ rte_spinlock_lock(&dev->ipsec_tbl_lock);
+
if (ctl->valid) {
- otx2_err("SA already registered");
- return -EINVAL;
+ snprintf(err_str, ERR_STR_SZ, "SA already registered");
+ ret = -EEXIST;
+ goto tbl_unlock;
}
memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
auth_key_len = auth_xform->auth.key.length;
}
- if (cipher_key_len != 0)
+ if (cipher_key_len != 0) {
memcpy(sa->cipher_key, cipher_key, cipher_key_len);
- else
- return -EINVAL;
+ } else {
+ snprintf(err_str, ERR_STR_SZ, "Invalid cipher key len");
+ ret = -EINVAL;
+ goto sa_clear;
+ }
sess->in_sa = sa;
sa->userdata = priv->userdata;
+ sa->replay_win_sz = ipsec->replay_win_sz;
+
+ if (lookup_mem_sa_index_update(eth_dev, ipsec->spi, sa, err_str)) {
+ ret = -EINVAL;
+ goto sa_clear;
+ }
+
ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
- if (ret)
- return ret;
+ if (ret) {
+ snprintf(err_str, ERR_STR_SZ,
+ "Could not set SA CTL word (err: %d)", ret);
+ goto sa_clear;
+ }
if (auth_key_len && auth_key) {
/* Get a queue pair for HMAC init */
ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
- if (ret)
- return ret;
+ if (ret) {
+ snprintf(err_str, ERR_STR_SZ, "Could not get CPT QP");
+ goto sa_clear;
+ }
+
ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
otx2_sec_idev_tx_cpt_qp_put(qp);
+ if (ret) {
+ snprintf(err_str, ERR_STR_SZ, "Could not put CPT QP");
+ goto sa_clear;
+ }
+ }
+
+ if (sa->replay_win_sz) {
+ if (sa->replay_win_sz > OTX2_IPSEC_MAX_REPLAY_WIN_SZ) {
+ snprintf(err_str, ERR_STR_SZ,
+ "Replay window size is not supported");
+ ret = -ENOTSUP;
+ goto sa_clear;
+ }
+ sa->replay = rte_zmalloc(NULL, sizeof(struct otx2_ipsec_replay),
+ 0);
+ if (sa->replay == NULL) {
+ snprintf(err_str, ERR_STR_SZ,
+ "Could not allocate memory");
+ ret = -ENOMEM;
+ goto sa_clear;
+ }
+
+ rte_spinlock_init(&sa->replay->lock);
+ /*
+ * Set window bottom to 1, base and top to size of
+ * window
+ */
+ sa->replay->winb = 1;
+ sa->replay->wint = sa->replay_win_sz;
+ sa->replay->base = sa->replay_win_sz;
+ sa->esn_low = 0;
+ sa->esn_hi = 0;
}
+
+ rte_io_wmb();
+ ctl->valid = 1;
+
+ rte_spinlock_unlock(&dev->ipsec_tbl_lock);
+ return 0;
+
+sa_clear:
+ memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
+
+tbl_unlock:
+ rte_spinlock_unlock(&dev->ipsec_tbl_lock);
+
+ otx2_err("%s", err_str);
+
return ret;
}
return ret;
}
+static void
+otx2_eth_sec_free_anti_replay(struct otx2_ipsec_fp_in_sa *sa)
+{
+ if (sa != NULL) {
+ if (sa->replay_win_sz && sa->replay)
+ rte_free(sa->replay);
+ }
+}
+
static int
-otx2_eth_sec_session_destroy(void *device __rte_unused,
+otx2_eth_sec_session_destroy(void *device,
struct rte_security_session *sess)
{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(device);
struct otx2_sec_session_ipsec_ip *sess_ip;
+ struct otx2_ipsec_fp_in_sa *sa;
struct otx2_sec_session *priv;
struct rte_mempool *sess_mp;
int ret;
sess_ip = &priv->ipsec.ip;
+ if (priv->ipsec.dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ rte_spinlock_lock(&dev->ipsec_tbl_lock);
+ sa = sess_ip->in_sa;
+
+ /* Release the anti replay window */
+ otx2_eth_sec_free_anti_replay(sa);
+
+ /* Clear SA table entry */
+ if (sa != NULL) {
+ sa->ctl.valid = 0;
+ rte_io_wmb();
+ }
+
+ rte_spinlock_unlock(&dev->ipsec_tbl_lock);
+ }
+
/* Release CPT LF used for this session */
if (sess_ip->qp != NULL) {
ret = otx2_sec_idev_tx_cpt_qp_put(sess_ip->qp);
struct rte_mbuf *m, void *params __rte_unused)
{
/* Set security session as the pkt metadata */
- m->udata64 = (uint64_t)session;
+ *rte_security_dynfield(m) = (rte_security_dynfield_t)session;
return 0;
}
req->ipsec_cfg0.sa_pow2_size =
rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
- req->ipsec_cfg0.lenm1_max = ETH_SEC_MAX_PKT_LEN - 1;
+ req->ipsec_cfg0.lenm1_max = NIX_MAX_FRS - 1;
req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
return otx2_mbox_process(mbox);
}
+int
+otx2_eth_sec_update_tag_type(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_rsp *rsp;
+ struct nix_aq_enq_req *aq;
+ int ret;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = 0; /* Read RQ:0 context */
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+
+ ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (ret < 0) {
+ otx2_err("Could not read RQ context");
+ return ret;
+ }
+
+ /* Update tag type */
+ ret = eth_sec_ipsec_cfg(eth_dev, rsp->rq.sso_tt);
+ if (ret < 0)
+ otx2_err("Could not update sec eth tag type");
+
+ return ret;
+}
+
int
otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
{
!(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
return 0;
+ if (rte_security_dynfield_register() < 0)
+ return -rte_errno;
+
nb_sa = dev->ipsec_in_max_spi;
mz_sz = nb_sa * sa_width;
in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
goto sec_fini;
}
+ rte_spinlock_init(&dev->ipsec_tbl_lock);
+
return 0;
sec_fini:
!(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
return;
+ lookup_mem_sa_tbl_clear(eth_dev);
+
in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
rte_memzone_free(rte_memzone_lookup(name));
}