1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
6 #include <rte_bus_pci.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_malloc.h>
11 #include "cpt_pmd_logs.h"
12 #include "cpt_pmd_ops_helper.h"
13 #include "cpt_ucode.h"
14 #include "cpt_request_mgr.h"
16 #include "otx_cryptodev.h"
17 #include "otx_cryptodev_capabilities.h"
18 #include "otx_cryptodev_hw_access.h"
19 #include "otx_cryptodev_ops.h"
21 static int otx_cryptodev_probe_count;
22 static rte_spinlock_t otx_probe_count_lock = RTE_SPINLOCK_INITIALIZER;
24 static struct rte_mempool *otx_cpt_meta_pool;
25 static int otx_cpt_op_mlen;
26 static int otx_cpt_op_sb_mlen;
28 /* Forward declarations */
31 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id);
34 * Initializes global variables used by fast-path code
37 * - 0 on success, errcode on error
40 init_global_resources(void)
42 /* Get meta len for scatter gather mode */
43 otx_cpt_op_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
45 /* Extra 4B saved for future considerations */
46 otx_cpt_op_mlen += 4 * sizeof(uint64_t);
48 otx_cpt_meta_pool = rte_mempool_create("cpt_metabuf-pool", 4096 * 16,
49 otx_cpt_op_mlen, 512, 0,
50 NULL, NULL, NULL, NULL,
52 if (!otx_cpt_meta_pool) {
53 CPT_LOG_ERR("cpt metabuf pool not created");
57 /* Get meta len for direct mode */
58 otx_cpt_op_sb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
60 /* Extra 4B saved for future considerations */
61 otx_cpt_op_sb_mlen += 4 * sizeof(uint64_t);
67 cleanup_global_resources(void)
70 rte_spinlock_lock(&otx_probe_count_lock);
72 /* Decrement the cryptodev count */
73 otx_cryptodev_probe_count--;
76 if (otx_cpt_meta_pool && otx_cryptodev_probe_count == 0)
77 rte_mempool_free(otx_cpt_meta_pool);
80 rte_spinlock_unlock(&otx_probe_count_lock);
86 otx_cpt_alarm_cb(void *arg)
88 struct cpt_vf *cptvf = arg;
89 otx_cpt_poll_misc(cptvf);
90 rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
91 otx_cpt_alarm_cb, cptvf);
95 otx_cpt_periodic_alarm_start(void *arg)
97 return rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
98 otx_cpt_alarm_cb, arg);
102 otx_cpt_periodic_alarm_stop(void *arg)
104 return rte_eal_alarm_cancel(otx_cpt_alarm_cb, arg);
110 otx_cpt_dev_config(struct rte_cryptodev *dev __rte_unused,
111 struct rte_cryptodev_config *config __rte_unused)
113 CPT_PMD_INIT_FUNC_TRACE();
118 otx_cpt_dev_start(struct rte_cryptodev *c_dev)
120 void *cptvf = c_dev->data->dev_private;
122 CPT_PMD_INIT_FUNC_TRACE();
124 return otx_cpt_start_device(cptvf);
128 otx_cpt_dev_stop(struct rte_cryptodev *c_dev)
130 void *cptvf = c_dev->data->dev_private;
132 CPT_PMD_INIT_FUNC_TRACE();
134 otx_cpt_stop_device(cptvf);
138 otx_cpt_dev_close(struct rte_cryptodev *c_dev)
140 void *cptvf = c_dev->data->dev_private;
143 CPT_PMD_INIT_FUNC_TRACE();
145 for (i = 0; i < c_dev->data->nb_queue_pairs; i++) {
146 ret = otx_cpt_que_pair_release(c_dev, i);
151 otx_cpt_periodic_alarm_stop(cptvf);
152 otx_cpt_deinit_device(cptvf);
158 otx_cpt_dev_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *info)
160 CPT_PMD_INIT_FUNC_TRACE();
162 info->max_nb_queue_pairs = CPT_NUM_QS_PER_VF;
163 info->feature_flags = dev->feature_flags;
164 info->capabilities = otx_get_capabilities();
165 info->sym.max_nb_sessions = 0;
166 info->driver_id = otx_cryptodev_driver_id;
167 info->min_mbuf_headroom_req = OTX_CPT_MIN_HEADROOM_REQ;
168 info->min_mbuf_tailroom_req = OTX_CPT_MIN_TAILROOM_REQ;
173 otx_cpt_stats_get(struct rte_cryptodev *dev __rte_unused,
174 struct rte_cryptodev_stats *stats __rte_unused)
176 CPT_PMD_INIT_FUNC_TRACE();
180 otx_cpt_stats_reset(struct rte_cryptodev *dev __rte_unused)
182 CPT_PMD_INIT_FUNC_TRACE();
186 otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
187 uint16_t que_pair_id,
188 const struct rte_cryptodev_qp_conf *qp_conf,
189 int socket_id __rte_unused,
190 struct rte_mempool *session_pool __rte_unused)
192 void *cptvf = dev->data->dev_private;
193 struct cpt_instance *instance = NULL;
194 struct rte_pci_device *pci_dev;
197 CPT_PMD_INIT_FUNC_TRACE();
199 if (dev->data->queue_pairs[que_pair_id] != NULL) {
200 ret = otx_cpt_que_pair_release(dev, que_pair_id);
205 if (qp_conf->nb_descriptors > DEFAULT_CMD_QLEN) {
206 CPT_LOG_INFO("Number of descriptors too big %d, using default "
207 "queue length of %d", qp_conf->nb_descriptors,
211 pci_dev = RTE_DEV_TO_PCI(dev->device);
213 if (pci_dev->mem_resource[0].addr == NULL) {
214 CPT_LOG_ERR("PCI mem address null");
218 ret = otx_cpt_get_resource(cptvf, 0, &instance);
219 if (ret != 0 || instance == NULL) {
220 CPT_LOG_ERR("Error getting instance handle from device %s : "
221 "ret = %d", dev->data->name, ret);
225 instance->queue_id = que_pair_id;
226 dev->data->queue_pairs[que_pair_id] = instance;
232 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id)
234 struct cpt_instance *instance = dev->data->queue_pairs[que_pair_id];
237 CPT_PMD_INIT_FUNC_TRACE();
239 ret = otx_cpt_put_resource(instance);
241 CPT_LOG_ERR("Error putting instance handle of device %s : "
242 "ret = %d", dev->data->name, ret);
246 dev->data->queue_pairs[que_pair_id] = NULL;
252 otx_cpt_get_session_size(struct rte_cryptodev *dev __rte_unused)
254 return cpt_get_session_size();
258 otx_cpt_session_init(void *sym_sess, uint8_t driver_id)
260 struct rte_cryptodev_sym_session *sess = sym_sess;
261 struct cpt_sess_misc *cpt_sess =
262 (struct cpt_sess_misc *) get_sym_session_private_data(sess, driver_id);
264 CPT_PMD_INIT_FUNC_TRACE();
265 cpt_sess->ctx_dma_addr = rte_mempool_virt2iova(cpt_sess) +
266 sizeof(struct cpt_sess_misc);
270 otx_cpt_session_cfg(struct rte_cryptodev *dev,
271 struct rte_crypto_sym_xform *xform,
272 struct rte_cryptodev_sym_session *sess,
273 struct rte_mempool *mempool)
275 struct rte_crypto_sym_xform *chain;
276 void *sess_private_data = NULL;
278 CPT_PMD_INIT_FUNC_TRACE();
280 if (cpt_is_algo_supported(xform))
283 if (unlikely(sess == NULL)) {
284 CPT_LOG_ERR("invalid session struct");
288 if (rte_mempool_get(mempool, &sess_private_data)) {
289 CPT_LOG_ERR("Could not allocate sess_private_data");
295 switch (chain->type) {
296 case RTE_CRYPTO_SYM_XFORM_AEAD:
297 if (fill_sess_aead(chain, sess_private_data))
300 case RTE_CRYPTO_SYM_XFORM_CIPHER:
301 if (fill_sess_cipher(chain, sess_private_data))
304 case RTE_CRYPTO_SYM_XFORM_AUTH:
305 if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
306 if (fill_sess_gmac(chain, sess_private_data))
309 if (fill_sess_auth(chain, sess_private_data))
314 CPT_LOG_ERR("Invalid crypto xform type");
319 set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
320 otx_cpt_session_init(sess, dev->driver_id);
324 if (sess_private_data)
325 rte_mempool_put(mempool, sess_private_data);
330 otx_cpt_session_clear(struct rte_cryptodev *dev,
331 struct rte_cryptodev_sym_session *sess)
333 void *sess_priv = get_sym_session_private_data(sess, dev->driver_id);
335 CPT_PMD_INIT_FUNC_TRACE();
337 memset(sess_priv, 0, otx_cpt_get_session_size(dev));
338 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
339 set_sym_session_private_data(sess, dev->driver_id, NULL);
340 rte_mempool_put(sess_mp, sess_priv);
345 otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
347 struct cpt_instance *instance = (struct cpt_instance *)qptr;
350 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
351 struct pending_queue *pqueue = &cptvf->pqueue;
353 count = DEFAULT_CMD_QLEN - pqueue->pending_count;
358 while (likely(count < nb_ops)) {
359 ret = cpt_pmd_crypto_operation(instance, ops[count], pqueue,
360 otx_cryptodev_driver_id);
365 otx_cpt_ring_dbell(instance, count);
370 otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
372 struct cpt_instance *instance = (struct cpt_instance *)qptr;
373 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
374 struct pending_queue *pqueue = &cptvf->pqueue;
375 uint16_t nb_completed, i = 0;
376 uint8_t compcode[nb_ops];
378 nb_completed = cpt_dequeue_burst(instance, nb_ops,
379 (void **)ops, compcode, pqueue);
380 while (likely(i < nb_completed)) {
381 struct rte_crypto_op *cop;
386 rsp = (void *)ops[i];
387 status = compcode[i];
388 if (likely((i + 1) < nb_completed))
389 rte_prefetch0(ops[i+1]);
390 metabuf = (void *)rsp[0];
391 cop = (void *)rsp[1];
395 if (likely(status == 0)) {
398 RTE_CRYPTO_OP_STATUS_SUCCESS;
400 compl_auth_verify(cop, (uint8_t *)rsp[2],
402 } else if (status == ERR_GC_ICV_MISCOMPARE) {
403 /*auth data mismatch */
404 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
406 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
408 free_op_meta(metabuf, cptvf->meta_info.cptvf_meta_pool);
414 static struct rte_cryptodev_ops cptvf_ops = {
415 /* Device related operations */
416 .dev_configure = otx_cpt_dev_config,
417 .dev_start = otx_cpt_dev_start,
418 .dev_stop = otx_cpt_dev_stop,
419 .dev_close = otx_cpt_dev_close,
420 .dev_infos_get = otx_cpt_dev_info_get,
422 .stats_get = otx_cpt_stats_get,
423 .stats_reset = otx_cpt_stats_reset,
424 .queue_pair_setup = otx_cpt_que_pair_setup,
425 .queue_pair_release = otx_cpt_que_pair_release,
426 .queue_pair_count = NULL,
428 /* Crypto related operations */
429 .sym_session_get_size = otx_cpt_get_session_size,
430 .sym_session_configure = otx_cpt_session_cfg,
431 .sym_session_clear = otx_cpt_session_clear
435 otx_cpt_common_vars_init(struct cpt_vf *cptvf)
437 cptvf->meta_info.cptvf_meta_pool = otx_cpt_meta_pool;
438 cptvf->meta_info.cptvf_op_mlen = otx_cpt_op_mlen;
439 cptvf->meta_info.cptvf_op_sb_mlen = otx_cpt_op_sb_mlen;
443 otx_cpt_dev_create(struct rte_cryptodev *c_dev)
445 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(c_dev->device);
446 struct cpt_vf *cptvf = NULL;
451 if (pdev->mem_resource[0].phys_addr == 0ULL)
454 /* for secondary processes, we don't initialise any further as primary
455 * has already done this work.
457 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
460 cptvf = rte_zmalloc_socket("otx_cryptodev_private_mem",
461 sizeof(struct cpt_vf), RTE_CACHE_LINE_SIZE,
465 CPT_LOG_ERR("Cannot allocate memory for device private data");
469 snprintf(dev_name, 32, "%02x:%02x.%x",
470 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
472 reg_base = pdev->mem_resource[0].addr;
474 CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name);
479 ret = otx_cpt_hw_init(cptvf, pdev, reg_base, dev_name);
481 CPT_LOG_ERR("Failed to init cptvf %s", dev_name);
486 /* Start off timer for mailbox interrupts */
487 otx_cpt_periodic_alarm_start(cptvf);
489 rte_spinlock_lock(&otx_probe_count_lock);
490 if (!otx_cryptodev_probe_count) {
491 ret = init_global_resources();
493 rte_spinlock_unlock(&otx_probe_count_lock);
497 otx_cryptodev_probe_count++;
498 rte_spinlock_unlock(&otx_probe_count_lock);
500 /* Initialize data path variables used by common code */
501 otx_cpt_common_vars_init(cptvf);
503 c_dev->dev_ops = &cptvf_ops;
505 c_dev->enqueue_burst = otx_cpt_pkt_enqueue;
506 c_dev->dequeue_burst = otx_cpt_pkt_dequeue;
508 c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
509 RTE_CRYPTODEV_FF_HW_ACCELERATED |
510 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
511 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
512 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
513 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT;
515 /* Save dev private data */
516 c_dev->data->dev_private = cptvf;
521 otx_cpt_periodic_alarm_stop(cptvf);
522 otx_cpt_deinit_device(cptvf);
526 /* Free private data allocated */