1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
11 #include "cnxk_cryptodev.h"
12 #include "cnxk_cryptodev_ops.h"
13 #include "cnxk_cryptodev_capabilities.h"
17 cnxk_cpt_get_mlen(void)
22 len = 2 * sizeof(uint64_t);
23 len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
25 len += ROC_SE_OFF_CTRL_LEN + ROC_CPT_AES_CBC_IV_LEN;
26 len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
27 (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
28 2) * ROC_SE_SG_ENTRY_SIZE),
35 cnxk_cpt_dev_config(struct rte_cryptodev *dev,
36 struct rte_cryptodev_config *conf)
38 struct cnxk_cpt_vf *vf = dev->data->dev_private;
39 struct roc_cpt *roc_cpt = &vf->cpt;
40 uint16_t nb_lf_avail, nb_lf;
43 dev->feature_flags &= ~conf->ff_disable;
45 nb_lf_avail = roc_cpt->nb_lf_avail;
46 nb_lf = conf->nb_queue_pairs;
48 if (nb_lf > nb_lf_avail)
51 ret = roc_cpt_dev_configure(roc_cpt, nb_lf);
53 plt_err("Could not configure device");
61 cnxk_cpt_dev_start(struct rte_cryptodev *dev)
69 cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
75 cnxk_cpt_dev_close(struct rte_cryptodev *dev)
77 struct cnxk_cpt_vf *vf = dev->data->dev_private;
81 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
82 ret = cnxk_cpt_queue_pair_release(dev, i);
84 plt_err("Could not release queue pair %u", i);
89 roc_cpt_dev_clear(&vf->cpt);
95 cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
96 struct rte_cryptodev_info *info)
98 struct cnxk_cpt_vf *vf = dev->data->dev_private;
99 struct roc_cpt *roc_cpt = &vf->cpt;
101 info->max_nb_queue_pairs = roc_cpt->nb_lf_avail;
102 info->feature_flags = dev->feature_flags;
103 info->capabilities = cnxk_crypto_capabilities_get(vf);
104 info->sym.max_nb_sessions = 0;
105 info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
106 info->min_mbuf_tailroom_req = 0;
110 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
112 snprintf(name, size, "cnxk_cpt_pq_mem_%u:%u", dev_id, qp_id);
116 cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
117 struct cnxk_cpt_qp *qp, uint8_t qp_id,
118 uint32_t nb_elements)
120 char mempool_name[RTE_MEMPOOL_NAMESIZE];
121 struct cpt_qp_meta_info *meta_info;
122 struct rte_mempool *pool;
126 if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
128 mlen = cnxk_cpt_get_mlen();
131 cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
133 /* Allocate mempool */
135 snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
136 dev->data->dev_id, qp_id);
138 pool = rte_mempool_create(mempool_name, nb_elements, mlen, cache_sz, 0,
139 NULL, NULL, NULL, NULL, rte_socket_id(), 0);
142 plt_err("Could not create mempool for metabuf");
146 meta_info = &qp->meta_info;
148 meta_info->pool = pool;
149 meta_info->mlen = mlen;
155 cnxk_cpt_metabuf_mempool_destroy(struct cnxk_cpt_qp *qp)
157 struct cpt_qp_meta_info *meta_info = &qp->meta_info;
159 rte_mempool_free(meta_info->pool);
161 meta_info->pool = NULL;
165 static struct cnxk_cpt_qp *
166 cnxk_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
169 const struct rte_memzone *pq_mem;
170 char name[RTE_MEMZONE_NAMESIZE];
171 struct cnxk_cpt_qp *qp;
176 /* Allocate queue pair */
177 qp = rte_zmalloc_socket("CNXK Crypto PMD Queue Pair", sizeof(*qp),
180 plt_err("Could not allocate queue pair");
184 /* For pending queue */
185 len = iq_len * sizeof(struct cpt_inflight_req);
187 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
190 pq_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
191 RTE_MEMZONE_SIZE_HINT_ONLY |
193 RTE_CACHE_LINE_SIZE);
194 if (pq_mem == NULL) {
195 plt_err("Could not allocate reserved memzone");
203 ret = cnxk_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
205 plt_err("Could not create mempool for metabuf");
209 /* Initialize pending queue */
210 qp->pend_q.req_queue = pq_mem->addr;
211 qp->pend_q.enq_tail = 0;
212 qp->pend_q.deq_head = 0;
213 qp->pend_q.pending_count = 0;
218 rte_memzone_free(pq_mem);
225 cnxk_cpt_qp_destroy(const struct rte_cryptodev *dev, struct cnxk_cpt_qp *qp)
227 const struct rte_memzone *pq_mem;
228 char name[RTE_MEMZONE_NAMESIZE];
231 cnxk_cpt_metabuf_mempool_destroy(qp);
233 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
236 pq_mem = rte_memzone_lookup(name);
238 ret = rte_memzone_free(pq_mem);
248 cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
250 struct cnxk_cpt_qp *qp = dev->data->queue_pairs[qp_id];
251 struct cnxk_cpt_vf *vf = dev->data->dev_private;
252 struct roc_cpt *roc_cpt = &vf->cpt;
253 struct roc_cpt_lf *lf;
259 lf = roc_cpt->lf[qp_id];
265 ret = cnxk_cpt_qp_destroy(dev, qp);
267 plt_err("Could not destroy queue pair %d", qp_id);
271 roc_cpt->lf[qp_id] = NULL;
272 dev->data->queue_pairs[qp_id] = NULL;
278 cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
279 const struct rte_cryptodev_qp_conf *conf,
280 int socket_id __rte_unused)
282 struct cnxk_cpt_vf *vf = dev->data->dev_private;
283 struct roc_cpt *roc_cpt = &vf->cpt;
284 struct rte_pci_device *pci_dev;
285 struct cnxk_cpt_qp *qp;
288 if (dev->data->queue_pairs[qp_id] != NULL)
289 cnxk_cpt_queue_pair_release(dev, qp_id);
291 pci_dev = RTE_DEV_TO_PCI(dev->device);
293 if (pci_dev->mem_resource[2].addr == NULL) {
294 plt_err("Invalid PCI mem address");
298 qp = cnxk_cpt_qp_create(dev, qp_id, conf->nb_descriptors);
300 plt_err("Could not create queue pair %d", qp_id);
304 qp->lf.lf_id = qp_id;
305 qp->lf.nb_desc = conf->nb_descriptors;
307 ret = roc_cpt_lf_init(roc_cpt, &qp->lf);
309 plt_err("Could not initialize queue pair %d", qp_id);
314 roc_cpt->lf[qp_id] = &qp->lf;
316 ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
318 roc_cpt->lf[qp_id] = NULL;
319 plt_err("Could not init lmtline for queue pair %d", qp_id);
323 qp->sess_mp = conf->mp_session;
324 qp->sess_mp_priv = conf->mp_session_private;
325 dev->data->queue_pairs[qp_id] = qp;
330 cnxk_cpt_qp_destroy(dev, qp);
335 cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
337 return sizeof(struct cnxk_se_sess);
341 sym_xform_verify(struct rte_crypto_sym_xform *xform)
343 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
344 xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
345 xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
348 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
349 return CNXK_CPT_CIPHER;
351 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
352 return CNXK_CPT_AUTH;
354 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && xform->next == NULL)
355 return CNXK_CPT_AEAD;
357 if (xform->next == NULL)
360 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
361 xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
362 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
363 xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
366 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
367 xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
368 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
369 xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
372 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
373 xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
374 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
375 xform->next->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
376 return CNXK_CPT_CIPHER_ENC_AUTH_GEN;
378 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
379 xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
380 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
381 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
382 return CNXK_CPT_AUTH_VRFY_CIPHER_DEC;
384 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
385 xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE &&
386 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
387 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
388 switch (xform->auth.algo) {
389 case RTE_CRYPTO_AUTH_SHA1_HMAC:
390 switch (xform->next->cipher.algo) {
391 case RTE_CRYPTO_CIPHER_AES_CBC:
392 return CNXK_CPT_AUTH_GEN_CIPHER_ENC;
401 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
402 xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
403 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
404 xform->next->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
405 switch (xform->cipher.algo) {
406 case RTE_CRYPTO_CIPHER_AES_CBC:
407 switch (xform->next->auth.algo) {
408 case RTE_CRYPTO_AUTH_SHA1_HMAC:
409 return CNXK_CPT_CIPHER_DEC_AUTH_VRFY;
422 cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
424 union cpt_inst_w7 inst_w7;
426 inst_w7.s.cptr = (uint64_t)&sess->roc_se_ctx.se_ctx;
428 /* Set the engine group */
429 if (sess->zsk_flag || sess->chacha_poly)
430 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
432 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
438 sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
439 struct rte_crypto_sym_xform *xform,
440 struct rte_cryptodev_sym_session *sess,
441 struct rte_mempool *pool)
443 struct cnxk_se_sess *sess_priv;
447 ret = sym_xform_verify(xform);
448 if (unlikely(ret < 0))
451 if (unlikely(rte_mempool_get(pool, &priv))) {
452 plt_dp_err("Could not allocate session private data");
456 memset(priv, 0, sizeof(struct cnxk_se_sess));
461 case CNXK_CPT_CIPHER:
462 ret = fill_sess_cipher(xform, sess_priv);
465 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
466 ret = fill_sess_gmac(xform, sess_priv);
468 ret = fill_sess_auth(xform, sess_priv);
471 ret = fill_sess_aead(xform, sess_priv);
473 case CNXK_CPT_CIPHER_ENC_AUTH_GEN:
474 case CNXK_CPT_CIPHER_DEC_AUTH_VRFY:
475 ret = fill_sess_cipher(xform, sess_priv);
478 ret = fill_sess_auth(xform->next, sess_priv);
480 case CNXK_CPT_AUTH_VRFY_CIPHER_DEC:
481 case CNXK_CPT_AUTH_GEN_CIPHER_ENC:
482 ret = fill_sess_auth(xform, sess_priv);
485 ret = fill_sess_cipher(xform->next, sess_priv);
494 if ((sess_priv->roc_se_ctx.fc_type == ROC_SE_HASH_HMAC) &&
495 cpt_mac_len_verify(&xform->auth)) {
496 plt_dp_err("MAC length is not supported");
501 sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
503 set_sym_session_private_data(sess, driver_id, sess_priv);
508 rte_mempool_put(pool, priv);
514 cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
515 struct rte_crypto_sym_xform *xform,
516 struct rte_cryptodev_sym_session *sess,
517 struct rte_mempool *pool)
519 struct cnxk_cpt_vf *vf = dev->data->dev_private;
520 struct roc_cpt *roc_cpt = &vf->cpt;
523 driver_id = dev->driver_id;
525 return sym_session_configure(roc_cpt, driver_id, xform, sess, pool);
529 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
531 void *priv = get_sym_session_private_data(sess, driver_id);
532 struct rte_mempool *pool;
537 memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
539 pool = rte_mempool_from_obj(priv);
541 set_sym_session_private_data(sess, driver_id, NULL);
543 rte_mempool_put(pool, priv);
547 cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
548 struct rte_cryptodev_sym_session *sess)
550 return sym_session_clear(dev->driver_id, sess);