1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
11 #include "cnxk_cryptodev.h"
12 #include "cnxk_cryptodev_ops.h"
16 cnxk_cpt_get_mlen(void)
21 len = 2 * sizeof(uint64_t);
22 len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
24 len += ROC_SE_OFF_CTRL_LEN + ROC_CPT_AES_CBC_IV_LEN;
25 len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
26 (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
27 2) * ROC_SE_SG_ENTRY_SIZE),
34 cnxk_cpt_dev_config(struct rte_cryptodev *dev,
35 struct rte_cryptodev_config *conf)
37 struct cnxk_cpt_vf *vf = dev->data->dev_private;
38 struct roc_cpt *roc_cpt = &vf->cpt;
39 uint16_t nb_lf_avail, nb_lf;
42 dev->feature_flags &= ~conf->ff_disable;
44 nb_lf_avail = roc_cpt->nb_lf_avail;
45 nb_lf = conf->nb_queue_pairs;
47 if (nb_lf > nb_lf_avail)
50 ret = roc_cpt_dev_configure(roc_cpt, nb_lf);
52 plt_err("Could not configure device");
60 cnxk_cpt_dev_start(struct rte_cryptodev *dev)
68 cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
74 cnxk_cpt_dev_close(struct rte_cryptodev *dev)
76 struct cnxk_cpt_vf *vf = dev->data->dev_private;
80 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
81 ret = cnxk_cpt_queue_pair_release(dev, i);
83 plt_err("Could not release queue pair %u", i);
88 roc_cpt_dev_clear(&vf->cpt);
94 cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
95 struct rte_cryptodev_info *info)
97 struct cnxk_cpt_vf *vf = dev->data->dev_private;
98 struct roc_cpt *roc_cpt = &vf->cpt;
100 info->max_nb_queue_pairs = roc_cpt->nb_lf_avail;
101 info->feature_flags = dev->feature_flags;
102 info->capabilities = NULL;
103 info->sym.max_nb_sessions = 0;
104 info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
105 info->min_mbuf_tailroom_req = 0;
109 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
111 snprintf(name, size, "cnxk_cpt_pq_mem_%u:%u", dev_id, qp_id);
115 cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
116 struct cnxk_cpt_qp *qp, uint8_t qp_id,
117 uint32_t nb_elements)
119 char mempool_name[RTE_MEMPOOL_NAMESIZE];
120 struct cpt_qp_meta_info *meta_info;
121 struct rte_mempool *pool;
125 if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
127 mlen = cnxk_cpt_get_mlen();
130 cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
132 /* Allocate mempool */
134 snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
135 dev->data->dev_id, qp_id);
137 pool = rte_mempool_create(mempool_name, nb_elements, mlen, cache_sz, 0,
138 NULL, NULL, NULL, NULL, rte_socket_id(), 0);
141 plt_err("Could not create mempool for metabuf");
145 meta_info = &qp->meta_info;
147 meta_info->pool = pool;
148 meta_info->mlen = mlen;
154 cnxk_cpt_metabuf_mempool_destroy(struct cnxk_cpt_qp *qp)
156 struct cpt_qp_meta_info *meta_info = &qp->meta_info;
158 rte_mempool_free(meta_info->pool);
160 meta_info->pool = NULL;
164 static struct cnxk_cpt_qp *
165 cnxk_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
168 const struct rte_memzone *pq_mem;
169 char name[RTE_MEMZONE_NAMESIZE];
170 struct cnxk_cpt_qp *qp;
175 /* Allocate queue pair */
176 qp = rte_zmalloc_socket("CNXK Crypto PMD Queue Pair", sizeof(*qp),
179 plt_err("Could not allocate queue pair");
183 /* For pending queue */
184 len = iq_len * sizeof(struct cpt_inflight_req);
186 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
189 pq_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
190 RTE_MEMZONE_SIZE_HINT_ONLY |
192 RTE_CACHE_LINE_SIZE);
193 if (pq_mem == NULL) {
194 plt_err("Could not allocate reserved memzone");
202 ret = cnxk_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
204 plt_err("Could not create mempool for metabuf");
208 /* Initialize pending queue */
209 qp->pend_q.req_queue = pq_mem->addr;
210 qp->pend_q.enq_tail = 0;
211 qp->pend_q.deq_head = 0;
212 qp->pend_q.pending_count = 0;
217 rte_memzone_free(pq_mem);
224 cnxk_cpt_qp_destroy(const struct rte_cryptodev *dev, struct cnxk_cpt_qp *qp)
226 const struct rte_memzone *pq_mem;
227 char name[RTE_MEMZONE_NAMESIZE];
230 cnxk_cpt_metabuf_mempool_destroy(qp);
232 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
235 pq_mem = rte_memzone_lookup(name);
237 ret = rte_memzone_free(pq_mem);
247 cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
249 struct cnxk_cpt_qp *qp = dev->data->queue_pairs[qp_id];
250 struct cnxk_cpt_vf *vf = dev->data->dev_private;
251 struct roc_cpt *roc_cpt = &vf->cpt;
252 struct roc_cpt_lf *lf;
258 lf = roc_cpt->lf[qp_id];
264 ret = cnxk_cpt_qp_destroy(dev, qp);
266 plt_err("Could not destroy queue pair %d", qp_id);
270 roc_cpt->lf[qp_id] = NULL;
271 dev->data->queue_pairs[qp_id] = NULL;
277 cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
278 const struct rte_cryptodev_qp_conf *conf,
279 int socket_id __rte_unused)
281 struct cnxk_cpt_vf *vf = dev->data->dev_private;
282 struct roc_cpt *roc_cpt = &vf->cpt;
283 struct rte_pci_device *pci_dev;
284 struct cnxk_cpt_qp *qp;
287 if (dev->data->queue_pairs[qp_id] != NULL)
288 cnxk_cpt_queue_pair_release(dev, qp_id);
290 pci_dev = RTE_DEV_TO_PCI(dev->device);
292 if (pci_dev->mem_resource[2].addr == NULL) {
293 plt_err("Invalid PCI mem address");
297 qp = cnxk_cpt_qp_create(dev, qp_id, conf->nb_descriptors);
299 plt_err("Could not create queue pair %d", qp_id);
303 qp->lf.lf_id = qp_id;
304 qp->lf.nb_desc = conf->nb_descriptors;
306 ret = roc_cpt_lf_init(roc_cpt, &qp->lf);
308 plt_err("Could not initialize queue pair %d", qp_id);
313 roc_cpt->lf[qp_id] = &qp->lf;
315 ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
317 roc_cpt->lf[qp_id] = NULL;
318 plt_err("Could not init lmtline for queue pair %d", qp_id);
322 qp->sess_mp = conf->mp_session;
323 qp->sess_mp_priv = conf->mp_session_private;
324 dev->data->queue_pairs[qp_id] = qp;
329 cnxk_cpt_qp_destroy(dev, qp);
334 cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
336 return sizeof(struct cnxk_se_sess);
340 sym_xform_verify(struct rte_crypto_sym_xform *xform)
342 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
343 xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
344 xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
347 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
348 return CNXK_CPT_CIPHER;
350 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
351 return CNXK_CPT_AUTH;
353 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && xform->next == NULL)
354 return CNXK_CPT_AEAD;
356 if (xform->next == NULL)
359 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
360 xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
361 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
362 xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
365 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
366 xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
367 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
368 xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
371 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
372 xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
373 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
374 xform->next->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
375 return CNXK_CPT_CIPHER_ENC_AUTH_GEN;
377 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
378 xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
379 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
380 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
381 return CNXK_CPT_AUTH_VRFY_CIPHER_DEC;
383 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
384 xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE &&
385 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
386 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
387 switch (xform->auth.algo) {
388 case RTE_CRYPTO_AUTH_SHA1_HMAC:
389 switch (xform->next->cipher.algo) {
390 case RTE_CRYPTO_CIPHER_AES_CBC:
391 return CNXK_CPT_AUTH_GEN_CIPHER_ENC;
400 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
401 xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
402 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
403 xform->next->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
404 switch (xform->cipher.algo) {
405 case RTE_CRYPTO_CIPHER_AES_CBC:
406 switch (xform->next->auth.algo) {
407 case RTE_CRYPTO_AUTH_SHA1_HMAC:
408 return CNXK_CPT_CIPHER_DEC_AUTH_VRFY;
421 cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
423 union cpt_inst_w7 inst_w7;
425 inst_w7.s.cptr = (uint64_t)&sess->roc_se_ctx.se_ctx;
427 /* Set the engine group */
428 if (sess->zsk_flag || sess->chacha_poly)
429 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
431 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
437 sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
438 struct rte_crypto_sym_xform *xform,
439 struct rte_cryptodev_sym_session *sess,
440 struct rte_mempool *pool)
442 struct cnxk_se_sess *sess_priv;
446 ret = sym_xform_verify(xform);
447 if (unlikely(ret < 0))
450 if (unlikely(rte_mempool_get(pool, &priv))) {
451 plt_dp_err("Could not allocate session private data");
455 memset(priv, 0, sizeof(struct cnxk_se_sess));
460 case CNXK_CPT_CIPHER:
461 ret = fill_sess_cipher(xform, sess_priv);
470 sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
472 set_sym_session_private_data(sess, driver_id, sess_priv);
477 rte_mempool_put(pool, priv);
483 cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
484 struct rte_crypto_sym_xform *xform,
485 struct rte_cryptodev_sym_session *sess,
486 struct rte_mempool *pool)
488 struct cnxk_cpt_vf *vf = dev->data->dev_private;
489 struct roc_cpt *roc_cpt = &vf->cpt;
492 driver_id = dev->driver_id;
494 return sym_session_configure(roc_cpt, driver_id, xform, sess, pool);
498 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
500 void *priv = get_sym_session_private_data(sess, driver_id);
501 struct rte_mempool *pool;
506 memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
508 pool = rte_mempool_from_obj(priv);
510 set_sym_session_private_data(sess, driver_id, NULL);
512 rte_mempool_put(pool, priv);
516 cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
517 struct rte_cryptodev_sym_session *sess)
519 return sym_session_clear(dev->driver_id, sess);