#include <rte_cryptodev_pmd.h>
#include <rte_errno.h>
+#include <rte_ethdev.h>
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_capabilities.h"
#include "otx2_cryptodev_hw_access.h"
#include "otx2_cryptodev_mbox.h"
#include "otx2_cryptodev_ops.h"
+#include "otx2_ipsec_po_ops.h"
#include "otx2_mbox.h"
+#include "otx2_sec_idev.h"
+#include "otx2_security.h"
#include "cpt_hw_types.h"
#include "cpt_pmd_logs.h"
meta_info->sg_mlen = 0;
}
+static int
+otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
+{
+ static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1);
+ uint16_t port_id, nb_ethport = rte_eth_dev_count_avail();
+ int i, ret;
+
+ for (i = 0; i < nb_ethport; i++) {
+ port_id = rte_atomic16_add_return(&port_offset, 1) % nb_ethport;
+ if (otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
+ break;
+ }
+
+ if (i >= nb_ethport)
+ return 0;
+
+ ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id);
+ if (ret)
+ return ret;
+
+ /* Publish inline Tx QP to eth dev security */
+ ret = otx2_sec_idev_tx_cpt_qp_add(port_id, qp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static struct otx2_cpt_qp *
otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
uint8_t group)
qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
+ ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
+ if (ret && (ret != -ENOENT)) {
+ CPT_LOG_ERR("Could not delete inline configuration");
+ goto mempool_destroy;
+ }
+
otx2_cpt_iq_disable(qp);
+ ret = otx2_cpt_qp_inline_cfg(dev, qp);
+ if (ret) {
+ CPT_LOG_ERR("Could not configure queue for inline IPsec");
+ goto mempool_destroy;
+ }
+
ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
size_div40);
if (ret) {
char name[RTE_MEMZONE_NAMESIZE];
int ret;
+ ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
+ if (ret && (ret != -ENOENT)) {
+ CPT_LOG_ERR("Could not delete inline configuration");
+ return ret;
+ }
+
otx2_cpt_iq_disable(qp);
otx2_cpt_metabuf_mempool_destroy(qp);
return 0;
}
+static int
+sym_xform_verify(struct rte_crypto_sym_xform *xform)
+{
+ if (xform->next) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ return -ENOTSUP;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return -ENOTSUP;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
+ return -ENOTSUP;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
+ return -ENOTSUP;
+
+ } else {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
+ xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
static int
sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
void *priv;
int ret;
- if (unlikely(cpt_is_algo_supported(xform))) {
- CPT_LOG_ERR("Crypto xform not supported");
- return -ENOTSUP;
- }
+ ret = sym_xform_verify(xform);
+ if (unlikely(ret))
+ return ret;
if (unlikely(rte_mempool_get(pool, &priv))) {
CPT_LOG_ERR("Could not allocate session private data");
return -ENOMEM;
}
+ memset(priv, 0, sizeof(struct cpt_sess_misc) +
+ offsetof(struct cpt_ctx, fctx));
+
misc = priv;
for ( ; xform != NULL; xform = xform->next) {
/*
* IE engines support IPsec operations
- * SE engines support IPsec operations and Air-Crypto operations
+ * SE engines support IPsec operations, Chacha-Poly and
+ * Air-Crypto operations
*/
- if (misc->zsk_flag)
+ if (misc->zsk_flag || misc->chacha_poly)
misc->egrp = OTX2_CPT_EGRP_SE;
else
misc->egrp = OTX2_CPT_EGRP_SE_IE;
priv_put:
rte_mempool_put(pool, priv);
- CPT_LOG_ERR("Crypto xform not supported");
return -ENOTSUP;
}
rte_mempool_put(pool, priv);
}
-static __rte_always_inline int32_t __hot
+static __rte_always_inline int32_t __rte_hot
otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
struct pending_queue *pend_q,
struct cpt_request_info *req)
* buffer immediately, a DMB is not required to push out
* LMTSTs.
*/
- rte_cio_wmb();
+ rte_io_wmb();
lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
} while (lmt_status == 0);
return 0;
}
-static __rte_always_inline int32_t __hot
+static __rte_always_inline int32_t __rte_hot
otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
struct rte_crypto_op *op,
struct pending_queue *pend_q)
if (unlikely(ret))
goto req_fail;
break;
+ case RTE_CRYPTO_ASYM_XFORM_ECPM:
+ ret = cpt_ecpm_prep(&asym_op->ecpm, ¶ms,
+ sess->ec_ctx.curveid);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
default:
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
ret = -EINVAL;
return ret;
}
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
struct pending_queue *pend_q)
{
return ret;
}
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
+otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
+ struct pending_queue *pend_q)
+{
+ struct otx2_sec_session_ipsec_lp *sess;
+ struct otx2_ipsec_po_sa_ctl *ctl_wrd;
+ struct otx2_sec_session *priv;
+ struct cpt_request_info *req;
+ int ret;
+
+ priv = get_sec_session_private_data(op->sym->sec_session);
+ sess = &priv->ipsec.lp;
+
+ ctl_wrd = &sess->in_sa.ctl;
+
+ if (ctl_wrd->direction == OTX2_IPSEC_PO_SA_DIRECTION_OUTBOUND)
+ ret = process_outb_sa(op, sess, &qp->meta_info, (void **)&req);
+ else
+ ret = process_inb_sa(op, sess, &qp->meta_info, (void **)&req);
+
+ if (unlikely(ret)) {
+ otx2_err("Crypto req : op %p, ret 0x%x", op, ret);
+ return ret;
+ }
+
+ ret = otx2_cpt_enqueue_req(qp, pend_q, req);
+
+ return ret;
+}
+
+static __rte_always_inline int __rte_hot
otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
struct pending_queue *pend_q)
{
for (count = 0; count < nb_ops; count++) {
op = ops[count];
if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
- if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+ ret = otx2_cpt_enqueue_sec(qp, op, pend_q);
+ else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
ret = otx2_cpt_enqueue_sym(qp, op, pend_q);
else
ret = otx2_cpt_enqueue_sym_sessless(qp, op,
ecdsa->s.length = prime_len;
}
+static __rte_always_inline void
+otx2_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
+ struct cpt_request_info *req,
+ struct cpt_asym_ec_ctx *ec)
+{
+ int prime_len = ec_grp[ec->curveid].prime.length;
+
+ memcpy(ecpm->r.x.data, req->rptr, prime_len);
+ memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+ ecpm->r.x.length = prime_len;
+ ecpm->r.y.length = prime_len;
+}
+
static void
otx2_cpt_asym_post_process(struct rte_crypto_op *cop,
struct cpt_request_info *req)
case RTE_CRYPTO_ASYM_XFORM_ECDSA:
otx2_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
break;
+ case RTE_CRYPTO_ASYM_XFORM_ECPM:
+ otx2_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
+ break;
default:
CPT_LOG_DP_DEBUG("Invalid crypto xform type");
cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
}
}
+static void
+otx2_cpt_sec_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
+{
+ struct cpt_request_info *req = (struct cpt_request_info *)rsp[2];
+ vq_cmd_word0_t *word0 = (vq_cmd_word0_t *)&req->ist.ei0;
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct rte_mbuf *m = sym_op->m_src;
+ struct rte_ipv4_hdr *ip;
+ uint16_t m_len;
+ int mdata_len;
+ char *data;
+
+ mdata_len = (int)rsp[3];
+ rte_pktmbuf_trim(m, mdata_len);
+
+ if ((word0->s.opcode & 0xff) == OTX2_IPSEC_PO_PROCESS_IPSEC_INB) {
+ data = rte_pktmbuf_mtod(m, char *);
+ ip = (struct rte_ipv4_hdr *)(data + OTX2_IPSEC_PO_INB_RPTR_HDR);
+
+ m_len = rte_be_to_cpu_16(ip->total_length);
+
+ m->data_len = m_len;
+ m->pkt_len = m_len;
+ m->data_off += OTX2_IPSEC_PO_INB_RPTR_HDR;
+ }
+}
+
static inline void
otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop,
uintptr_t *rsp, uint8_t cc)
{
if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ if (likely(cc == OTX2_IPSEC_PO_CC_SUCCESS)) {
+ otx2_cpt_sec_post_process(cop, rsp);
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ return;
+ }
+
if (likely(cc == NO_ERR)) {
/* Verify authentication data if required */
if (unlikely(rsp[2]))
goto queues_detach;
}
+ ret = otx2_cpt_inline_init(dev);
+ if (ret) {
+ CPT_LOG_ERR("Could not enable inline IPsec");
+ goto intr_unregister;
+ }
+
dev->enqueue_burst = otx2_cpt_enqueue_burst;
dev->dequeue_burst = otx2_cpt_dequeue_burst;
rte_mb();
return 0;
+intr_unregister:
+ otx2_cpt_err_intr_unregister(dev);
queues_detach:
otx2_cpt_queues_detach(dev);
return ret;
.stats_reset = NULL,
.queue_pair_setup = otx2_cpt_queue_pair_setup,
.queue_pair_release = otx2_cpt_queue_pair_release,
- .queue_pair_count = NULL,
/* Symmetric crypto ops */
.sym_session_get_size = otx2_cpt_sym_session_get_size,