1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
8 #include <rte_branch_prediction.h>
9 #include <rte_common.h>
10 #include <rte_cryptodev.h>
11 #include <rte_errno.h>
12 #include <rte_mempool.h>
13 #include <rte_memzone.h>
14 #include <rte_string_fns.h>
16 #include "otx_cryptodev_hw_access.h"
17 #include "otx_cryptodev_mbox.h"
19 #include "cpt_pmd_logs.h"
20 #include "cpt_pmd_ops_helper.h"
21 #include "cpt_hw_types.h"
23 #define METABUF_POOL_CACHE_SIZE 512
27 * Access its own BAR0/4 registers by passing VF number as 0.
28 * OS/PCI maps them accordingly.
32 otx_cpt_vf_init(struct cpt_vf *cptvf)
36 /* Check ready with PF */
37 /* Gets chip ID / device Id from PF if ready */
38 ret = otx_cpt_check_pf_ready(cptvf);
40 CPT_LOG_ERR("%s: PF not responding to READY msg",
46 CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__);
53 * Read Interrupt status of the VF
55 * @param cptvf cptvf structure
58 otx_cpt_read_vf_misc_intr_status(struct cpt_vf *cptvf)
60 return CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), CPTX_VQX_MISC_INT(0, 0));
64 * Clear mailbox interrupt of the VF
66 * @param cptvf cptvf structure
69 otx_cpt_clear_mbox_intr(struct cpt_vf *cptvf)
71 cptx_vqx_misc_int_t vqx_misc_int;
73 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
74 CPTX_VQX_MISC_INT(0, 0));
76 vqx_misc_int.s.mbox = 1;
77 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
78 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
82 * Clear instruction NCB read error interrupt of the VF
84 * @param cptvf cptvf structure
87 otx_cpt_clear_irde_intr(struct cpt_vf *cptvf)
89 cptx_vqx_misc_int_t vqx_misc_int;
91 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
92 CPTX_VQX_MISC_INT(0, 0));
94 vqx_misc_int.s.irde = 1;
95 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
96 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
100 * Clear NCB result write response error interrupt of the VF
102 * @param cptvf cptvf structure
105 otx_cpt_clear_nwrp_intr(struct cpt_vf *cptvf)
107 cptx_vqx_misc_int_t vqx_misc_int;
109 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
110 CPTX_VQX_MISC_INT(0, 0));
112 vqx_misc_int.s.nwrp = 1;
113 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
114 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
118 * Clear swerr interrupt of the VF
120 * @param cptvf cptvf structure
123 otx_cpt_clear_swerr_intr(struct cpt_vf *cptvf)
125 cptx_vqx_misc_int_t vqx_misc_int;
127 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
128 CPTX_VQX_MISC_INT(0, 0));
130 vqx_misc_int.s.swerr = 1;
131 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
132 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
136 * Clear hwerr interrupt of the VF
138 * @param cptvf cptvf structure
141 otx_cpt_clear_hwerr_intr(struct cpt_vf *cptvf)
143 cptx_vqx_misc_int_t vqx_misc_int;
145 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
146 CPTX_VQX_MISC_INT(0, 0));
148 vqx_misc_int.s.hwerr = 1;
149 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
150 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
154 * Clear translation fault interrupt of the VF
156 * @param cptvf cptvf structure
159 otx_cpt_clear_fault_intr(struct cpt_vf *cptvf)
161 cptx_vqx_misc_int_t vqx_misc_int;
163 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
164 CPTX_VQX_MISC_INT(0, 0));
166 vqx_misc_int.s.fault = 1;
167 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
168 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
172 * Clear doorbell overflow interrupt of the VF
174 * @param cptvf cptvf structure
177 otx_cpt_clear_dovf_intr(struct cpt_vf *cptvf)
179 cptx_vqx_misc_int_t vqx_misc_int;
181 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
182 CPTX_VQX_MISC_INT(0, 0));
184 vqx_misc_int.s.dovf = 1;
185 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
186 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
189 /* Write to VQX_CTL register
192 otx_cpt_write_vq_ctl(struct cpt_vf *cptvf, bool val)
194 cptx_vqx_ctl_t vqx_ctl;
196 vqx_ctl.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
199 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
200 CPTX_VQX_CTL(0, 0), vqx_ctl.u);
203 /* Write to VQX_INPROG register
206 otx_cpt_write_vq_inprog(struct cpt_vf *cptvf, uint8_t val)
208 cptx_vqx_inprog_t vqx_inprg;
210 vqx_inprg.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
211 CPTX_VQX_INPROG(0, 0));
212 vqx_inprg.s.inflight = val;
213 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
214 CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
217 /* Write to VQX_DONE_WAIT NUMWAIT register
220 otx_cpt_write_vq_done_numwait(struct cpt_vf *cptvf, uint32_t val)
222 cptx_vqx_done_wait_t vqx_dwait;
224 vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
225 CPTX_VQX_DONE_WAIT(0, 0));
226 vqx_dwait.s.num_wait = val;
227 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
228 CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u);
231 /* Write to VQX_DONE_WAIT NUM_WAIT register
234 otx_cpt_write_vq_done_timewait(struct cpt_vf *cptvf, uint16_t val)
236 cptx_vqx_done_wait_t vqx_dwait;
238 vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
239 CPTX_VQX_DONE_WAIT(0, 0));
240 vqx_dwait.s.time_wait = val;
241 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
242 CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u);
245 /* Write to VQX_SADDR register
248 otx_cpt_write_vq_saddr(struct cpt_vf *cptvf, uint64_t val)
250 cptx_vqx_saddr_t vqx_saddr;
253 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
254 CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
258 otx_cpt_vfvq_init(struct cpt_vf *cptvf)
260 uint64_t base_addr = 0;
263 otx_cpt_write_vq_ctl(cptvf, 0);
265 /* Reset the doorbell */
266 otx_cpt_write_vq_doorbell(cptvf, 0);
268 otx_cpt_write_vq_inprog(cptvf, 0);
271 base_addr = (uint64_t)(cptvf->cqueue.chead[0].dma_addr);
272 otx_cpt_write_vq_saddr(cptvf, base_addr);
274 /* Configure timerhold / coalescence */
275 otx_cpt_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
276 otx_cpt_write_vq_done_numwait(cptvf, CPT_COUNT_THOLD);
279 otx_cpt_write_vq_ctl(cptvf, 1);
283 cpt_vq_init(struct cpt_vf *cptvf, uint8_t group)
287 /* Convey VQ LEN to PF */
288 err = otx_cpt_send_vq_size_msg(cptvf);
290 CPT_LOG_ERR("%s: PF not responding to QLEN msg",
296 /* CPT VF device initialization */
297 otx_cpt_vfvq_init(cptvf);
299 /* Send msg to PF to assign currnet Q to required group */
300 cptvf->vfgrp = group;
301 err = otx_cpt_send_vf_grp_msg(cptvf, group);
303 CPT_LOG_ERR("%s: PF not responding to VF_GRP msg",
309 CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__);
317 otx_cpt_poll_misc(struct cpt_vf *cptvf)
321 intr = otx_cpt_read_vf_misc_intr_status(cptvf);
326 /* Check for MISC interrupt types */
327 if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
328 CPT_LOG_DP_DEBUG("%s: Mailbox interrupt 0x%lx on CPT VF %d",
329 cptvf->dev_name, (unsigned int long)intr, cptvf->vfid);
330 otx_cpt_handle_mbox_intr(cptvf);
331 otx_cpt_clear_mbox_intr(cptvf);
332 } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
333 otx_cpt_clear_irde_intr(cptvf);
334 CPT_LOG_DP_DEBUG("%s: Instruction NCB read error interrupt "
335 "0x%lx on CPT VF %d", cptvf->dev_name,
336 (unsigned int long)intr, cptvf->vfid);
337 } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
338 otx_cpt_clear_nwrp_intr(cptvf);
339 CPT_LOG_DP_DEBUG("%s: NCB response write error interrupt 0x%lx"
340 " on CPT VF %d", cptvf->dev_name,
341 (unsigned int long)intr, cptvf->vfid);
342 } else if (unlikely(intr & CPT_VF_INTR_SWERR_MASK)) {
343 otx_cpt_clear_swerr_intr(cptvf);
344 CPT_LOG_DP_DEBUG("%s: Software error interrupt 0x%lx on CPT VF "
345 "%d", cptvf->dev_name, (unsigned int long)intr,
347 } else if (unlikely(intr & CPT_VF_INTR_HWERR_MASK)) {
348 otx_cpt_clear_hwerr_intr(cptvf);
349 CPT_LOG_DP_DEBUG("%s: Hardware error interrupt 0x%lx on CPT VF "
350 "%d", cptvf->dev_name, (unsigned int long)intr,
352 } else if (unlikely(intr & CPT_VF_INTR_FAULT_MASK)) {
353 otx_cpt_clear_fault_intr(cptvf);
354 CPT_LOG_DP_DEBUG("%s: Translation fault interrupt 0x%lx on CPT VF "
355 "%d", cptvf->dev_name, (unsigned int long)intr,
357 } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
358 otx_cpt_clear_dovf_intr(cptvf);
359 CPT_LOG_DP_DEBUG("%s: Doorbell overflow interrupt 0x%lx on CPT VF "
360 "%d", cptvf->dev_name, (unsigned int long)intr,
363 CPT_LOG_DP_ERR("%s: Unhandled interrupt 0x%lx in CPT VF %d",
364 cptvf->dev_name, (unsigned int long)intr,
369 otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name)
371 memset(cptvf, 0, sizeof(struct cpt_vf));
373 /* Bar0 base address */
374 cptvf->reg_base = reg_base;
376 /* Save device name */
377 strlcpy(cptvf->dev_name, name, (sizeof(cptvf->dev_name)));
381 /* To clear if there are any pending mbox msgs */
382 otx_cpt_poll_misc(cptvf);
384 if (otx_cpt_vf_init(cptvf)) {
385 CPT_LOG_ERR("Failed to initialize CPT VF device");
393 otx_cpt_deinit_device(void *dev)
395 struct cpt_vf *cptvf = (struct cpt_vf *)dev;
397 /* Do misc work one last time */
398 otx_cpt_poll_misc(cptvf);
404 otx_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
405 struct cpt_instance *instance, uint8_t qp_id,
408 char mempool_name[RTE_MEMPOOL_NAMESIZE];
409 int sg_mlen, lb_mlen, max_mlen, ret;
410 struct cpt_qp_meta_info *meta_info;
411 struct rte_mempool *pool;
413 /* Get meta len for scatter gather mode */
414 sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
416 /* Extra 32B saved for future considerations */
417 sg_mlen += 4 * sizeof(uint64_t);
419 /* Get meta len for linear buffer (direct) mode */
420 lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
422 /* Extra 32B saved for future considerations */
423 lb_mlen += 4 * sizeof(uint64_t);
425 /* Check max requirement for meta buffer */
426 max_mlen = RTE_MAX(lb_mlen, sg_mlen);
428 /* Allocate mempool */
430 snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx_cpt_mb_%u:%u",
431 dev->data->dev_id, qp_id);
433 pool = rte_mempool_create_empty(mempool_name, nb_elements, max_mlen,
434 METABUF_POOL_CACHE_SIZE, 0,
438 CPT_LOG_ERR("Could not create mempool for metabuf");
442 ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS,
445 CPT_LOG_ERR("Could not set mempool ops");
449 ret = rte_mempool_populate_default(pool);
451 CPT_LOG_ERR("Could not populate metabuf pool");
455 meta_info = &instance->meta_info;
457 meta_info->pool = pool;
458 meta_info->lb_mlen = lb_mlen;
459 meta_info->sg_mlen = sg_mlen;
464 rte_mempool_free(pool);
469 otx_cpt_metabuf_mempool_destroy(struct cpt_instance *instance)
471 struct cpt_qp_meta_info *meta_info = &instance->meta_info;
473 rte_mempool_free(meta_info->pool);
475 meta_info->pool = NULL;
476 meta_info->lb_mlen = 0;
477 meta_info->sg_mlen = 0;
481 otx_cpt_get_resource(const struct rte_cryptodev *dev, uint8_t group,
482 struct cpt_instance **instance, uint16_t qp_id)
484 int ret = -ENOENT, len, qlen, i;
485 int chunk_len, chunks, chunk_size;
486 struct cpt_vf *cptvf = dev->data->dev_private;
487 struct cpt_instance *cpt_instance;
488 struct command_chunk *chunk_head = NULL, *chunk_prev = NULL;
489 struct command_chunk *chunk = NULL;
491 const struct rte_memzone *rz;
492 uint64_t dma_addr = 0, alloc_len, used_len;
494 uint64_t pg_sz = sysconf(_SC_PAGESIZE);
496 CPT_LOG_DP_DEBUG("Initializing cpt resource %s", cptvf->dev_name);
498 cpt_instance = &cptvf->instance;
500 memset(&cptvf->cqueue, 0, sizeof(cptvf->cqueue));
501 memset(&cptvf->pqueue, 0, sizeof(cptvf->pqueue));
503 /* Chunks are of fixed size buffers */
504 chunks = DEFAULT_CMD_QCHUNKS;
505 chunk_len = DEFAULT_CMD_QCHUNK_SIZE;
507 qlen = chunks * chunk_len;
508 /* Chunk size includes 8 bytes of next chunk ptr */
509 chunk_size = chunk_len * CPT_INST_SIZE + CPT_NEXT_CHUNK_PTR_SIZE;
511 /* For command chunk structures */
512 len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8);
514 /* For pending queue */
515 len += qlen * RTE_ALIGN(sizeof(struct rid), 8);
517 /* So that instruction queues start as pg size aligned */
518 len = RTE_ALIGN(len, pg_sz);
520 /* For Instruction queues */
521 len += chunks * RTE_ALIGN(chunk_size, 128);
523 /* Wastage after instruction queues */
524 len = RTE_ALIGN(len, pg_sz);
526 rz = rte_memzone_reserve_aligned(cptvf->dev_name, len, cptvf->node,
527 RTE_MEMZONE_SIZE_HINT_ONLY |
529 RTE_CACHE_LINE_SIZE);
536 dma_addr = rz->phys_addr;
541 cpt_instance->rsvd = (uintptr_t)rz;
543 ret = otx_cpt_metabuf_mempool_create(dev, cpt_instance, qp_id, qlen);
545 CPT_LOG_ERR("Could not create mempool for metabuf");
549 /* Pending queue setup */
550 cptvf->pqueue.rid_queue = (struct rid *)mem;
551 cptvf->pqueue.enq_tail = 0;
552 cptvf->pqueue.deq_head = 0;
553 cptvf->pqueue.pending_count = 0;
555 mem += qlen * RTE_ALIGN(sizeof(struct rid), 8);
556 len -= qlen * RTE_ALIGN(sizeof(struct rid), 8);
557 dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8);
559 /* Alignment wastage */
560 used_len = alloc_len - len;
561 mem += RTE_ALIGN(used_len, pg_sz) - used_len;
562 len -= RTE_ALIGN(used_len, pg_sz) - used_len;
563 dma_addr += RTE_ALIGN(used_len, pg_sz) - used_len;
565 /* Init instruction queues */
566 chunk_head = &cptvf->cqueue.chead[0];
570 for (i = 0; i < DEFAULT_CMD_QCHUNKS; i++) {
573 chunk = &cptvf->cqueue.chead[i];
575 chunk->dma_addr = dma_addr;
577 csize = RTE_ALIGN(chunk_size, 128);
583 next_ptr = (uint64_t *)(chunk_prev->head +
585 *next_ptr = (uint64_t)chunk->dma_addr;
590 next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8);
591 *next_ptr = (uint64_t)chunk_head->dma_addr;
595 /* This is used for CPT(0)_PF_Q(0..15)_CTL.size config */
596 cptvf->qsize = chunk_size / 8;
597 cptvf->cqueue.qhead = chunk_head->head;
598 cptvf->cqueue.idx = 0;
599 cptvf->cqueue.cchunk = 0;
601 if (cpt_vq_init(cptvf, group)) {
602 CPT_LOG_ERR("Failed to initialize CPT VQ of device %s",
605 goto mempool_destroy;
608 *instance = cpt_instance;
610 CPT_LOG_DP_DEBUG("Crypto device (%s) initialized", cptvf->dev_name);
615 otx_cpt_metabuf_mempool_destroy(cpt_instance);
617 rte_memzone_free(rz);
624 otx_cpt_put_resource(struct cpt_instance *instance)
626 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
627 struct rte_memzone *rz;
630 CPT_LOG_ERR("Invalid CPTVF handle");
634 CPT_LOG_DP_DEBUG("Releasing cpt device %s", cptvf->dev_name);
636 otx_cpt_metabuf_mempool_destroy(instance);
638 rz = (struct rte_memzone *)instance->rsvd;
639 rte_memzone_free(rz);
644 otx_cpt_start_device(void *dev)
647 struct cpt_vf *cptvf = (struct cpt_vf *)dev;
649 rc = otx_cpt_send_vf_up(cptvf);
651 CPT_LOG_ERR("Failed to mark CPT VF device %s UP, rc = %d",
652 cptvf->dev_name, rc);
656 if ((cptvf->vftype != SE_TYPE) && (cptvf->vftype != AE_TYPE)) {
657 CPT_LOG_ERR("Fatal error, unexpected vf type %u, for CPT VF "
658 "device %s", cptvf->vftype, cptvf->dev_name);
666 otx_cpt_stop_device(void *dev)
669 uint32_t pending, retries = 5;
670 struct cpt_vf *cptvf = (struct cpt_vf *)dev;
672 /* Wait for pending entries to complete */
673 pending = otx_cpt_read_vq_doorbell(cptvf);
675 CPT_LOG_DP_DEBUG("%s: Waiting for pending %u cmds to complete",
676 cptvf->dev_name, pending);
678 pending = otx_cpt_read_vq_doorbell(cptvf);
684 if (!retries && pending) {
685 CPT_LOG_ERR("%s: Timeout waiting for commands(%u)",
686 cptvf->dev_name, pending);
690 rc = otx_cpt_send_vf_down(cptvf);
692 CPT_LOG_ERR("Failed to bring down vf %s, rc %d",
693 cptvf->dev_name, rc);