1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
8 #include <rte_branch_prediction.h>
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_memzone.h>
12 #include <rte_string_fns.h>
14 #include "otx_cryptodev_hw_access.h"
15 #include "otx_cryptodev_mbox.h"
17 #include "cpt_pmd_logs.h"
18 #include "cpt_hw_types.h"
22 * Access its own BAR0/4 registers by passing VF number as 0.
23 * OS/PCI maps them accordingly.
27 otx_cpt_vf_init(struct cpt_vf *cptvf)
31 /* Check ready with PF */
32 /* Gets chip ID / device Id from PF if ready */
33 ret = otx_cpt_check_pf_ready(cptvf);
35 CPT_LOG_ERR("%s: PF not responding to READY msg",
41 CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__);
48 * Read Interrupt status of the VF
50 * @param cptvf cptvf structure
53 otx_cpt_read_vf_misc_intr_status(struct cpt_vf *cptvf)
55 return CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), CPTX_VQX_MISC_INT(0, 0));
59 * Clear mailbox interrupt of the VF
61 * @param cptvf cptvf structure
64 otx_cpt_clear_mbox_intr(struct cpt_vf *cptvf)
66 cptx_vqx_misc_int_t vqx_misc_int;
68 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
69 CPTX_VQX_MISC_INT(0, 0));
71 vqx_misc_int.s.mbox = 1;
72 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
73 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
77 * Clear instruction NCB read error interrupt of the VF
79 * @param cptvf cptvf structure
82 otx_cpt_clear_irde_intr(struct cpt_vf *cptvf)
84 cptx_vqx_misc_int_t vqx_misc_int;
86 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
87 CPTX_VQX_MISC_INT(0, 0));
89 vqx_misc_int.s.irde = 1;
90 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
91 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
95 * Clear NCB result write response error interrupt of the VF
97 * @param cptvf cptvf structure
100 otx_cpt_clear_nwrp_intr(struct cpt_vf *cptvf)
102 cptx_vqx_misc_int_t vqx_misc_int;
104 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
105 CPTX_VQX_MISC_INT(0, 0));
107 vqx_misc_int.s.nwrp = 1;
108 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
109 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
113 * Clear swerr interrupt of the VF
115 * @param cptvf cptvf structure
118 otx_cpt_clear_swerr_intr(struct cpt_vf *cptvf)
120 cptx_vqx_misc_int_t vqx_misc_int;
122 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
123 CPTX_VQX_MISC_INT(0, 0));
125 vqx_misc_int.s.swerr = 1;
126 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
127 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
131 * Clear hwerr interrupt of the VF
133 * @param cptvf cptvf structure
136 otx_cpt_clear_hwerr_intr(struct cpt_vf *cptvf)
138 cptx_vqx_misc_int_t vqx_misc_int;
140 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
141 CPTX_VQX_MISC_INT(0, 0));
143 vqx_misc_int.s.hwerr = 1;
144 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
145 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
149 * Clear translation fault interrupt of the VF
151 * @param cptvf cptvf structure
154 otx_cpt_clear_fault_intr(struct cpt_vf *cptvf)
156 cptx_vqx_misc_int_t vqx_misc_int;
158 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
159 CPTX_VQX_MISC_INT(0, 0));
161 vqx_misc_int.s.fault = 1;
162 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
163 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
167 * Clear doorbell overflow interrupt of the VF
169 * @param cptvf cptvf structure
172 otx_cpt_clear_dovf_intr(struct cpt_vf *cptvf)
174 cptx_vqx_misc_int_t vqx_misc_int;
176 vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
177 CPTX_VQX_MISC_INT(0, 0));
179 vqx_misc_int.s.dovf = 1;
180 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
181 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
184 /* Write to VQX_CTL register
187 otx_cpt_write_vq_ctl(struct cpt_vf *cptvf, bool val)
189 cptx_vqx_ctl_t vqx_ctl;
191 vqx_ctl.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
194 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
195 CPTX_VQX_CTL(0, 0), vqx_ctl.u);
198 /* Write to VQX_INPROG register
201 otx_cpt_write_vq_inprog(struct cpt_vf *cptvf, uint8_t val)
203 cptx_vqx_inprog_t vqx_inprg;
205 vqx_inprg.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
206 CPTX_VQX_INPROG(0, 0));
207 vqx_inprg.s.inflight = val;
208 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
209 CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
212 /* Write to VQX_DONE_WAIT NUMWAIT register
215 otx_cpt_write_vq_done_numwait(struct cpt_vf *cptvf, uint32_t val)
217 cptx_vqx_done_wait_t vqx_dwait;
219 vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
220 CPTX_VQX_DONE_WAIT(0, 0));
221 vqx_dwait.s.num_wait = val;
222 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
223 CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u);
226 /* Write to VQX_DONE_WAIT NUM_WAIT register
229 otx_cpt_write_vq_done_timewait(struct cpt_vf *cptvf, uint16_t val)
231 cptx_vqx_done_wait_t vqx_dwait;
233 vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
234 CPTX_VQX_DONE_WAIT(0, 0));
235 vqx_dwait.s.time_wait = val;
236 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
237 CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u);
240 /* Write to VQX_SADDR register
243 otx_cpt_write_vq_saddr(struct cpt_vf *cptvf, uint64_t val)
245 cptx_vqx_saddr_t vqx_saddr;
248 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
249 CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
253 otx_cpt_vfvq_init(struct cpt_vf *cptvf)
255 uint64_t base_addr = 0;
258 otx_cpt_write_vq_ctl(cptvf, 0);
260 /* Reset the doorbell */
261 otx_cpt_write_vq_doorbell(cptvf, 0);
263 otx_cpt_write_vq_inprog(cptvf, 0);
266 base_addr = (uint64_t)(cptvf->cqueue.chead[0].dma_addr);
267 otx_cpt_write_vq_saddr(cptvf, base_addr);
269 /* Configure timerhold / coalescence */
270 otx_cpt_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
271 otx_cpt_write_vq_done_numwait(cptvf, CPT_COUNT_THOLD);
274 otx_cpt_write_vq_ctl(cptvf, 1);
278 cpt_vq_init(struct cpt_vf *cptvf, uint8_t group)
282 /* Convey VQ LEN to PF */
283 err = otx_cpt_send_vq_size_msg(cptvf);
285 CPT_LOG_ERR("%s: PF not responding to QLEN msg",
291 /* CPT VF device initialization */
292 otx_cpt_vfvq_init(cptvf);
294 /* Send msg to PF to assign currnet Q to required group */
295 cptvf->vfgrp = group;
296 err = otx_cpt_send_vf_grp_msg(cptvf, group);
298 CPT_LOG_ERR("%s: PF not responding to VF_GRP msg",
304 CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__);
312 otx_cpt_poll_misc(struct cpt_vf *cptvf)
316 intr = otx_cpt_read_vf_misc_intr_status(cptvf);
321 /* Check for MISC interrupt types */
322 if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
323 CPT_LOG_DP_DEBUG("%s: Mailbox interrupt 0x%lx on CPT VF %d",
324 cptvf->dev_name, (unsigned int long)intr, cptvf->vfid);
325 otx_cpt_handle_mbox_intr(cptvf);
326 otx_cpt_clear_mbox_intr(cptvf);
327 } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
328 otx_cpt_clear_irde_intr(cptvf);
329 CPT_LOG_DP_DEBUG("%s: Instruction NCB read error interrupt "
330 "0x%lx on CPT VF %d", cptvf->dev_name,
331 (unsigned int long)intr, cptvf->vfid);
332 } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
333 otx_cpt_clear_nwrp_intr(cptvf);
334 CPT_LOG_DP_DEBUG("%s: NCB response write error interrupt 0x%lx"
335 " on CPT VF %d", cptvf->dev_name,
336 (unsigned int long)intr, cptvf->vfid);
337 } else if (unlikely(intr & CPT_VF_INTR_SWERR_MASK)) {
338 otx_cpt_clear_swerr_intr(cptvf);
339 CPT_LOG_DP_DEBUG("%s: Software error interrupt 0x%lx on CPT VF "
340 "%d", cptvf->dev_name, (unsigned int long)intr,
342 } else if (unlikely(intr & CPT_VF_INTR_HWERR_MASK)) {
343 otx_cpt_clear_hwerr_intr(cptvf);
344 CPT_LOG_DP_DEBUG("%s: Hardware error interrupt 0x%lx on CPT VF "
345 "%d", cptvf->dev_name, (unsigned int long)intr,
347 } else if (unlikely(intr & CPT_VF_INTR_FAULT_MASK)) {
348 otx_cpt_clear_fault_intr(cptvf);
349 CPT_LOG_DP_DEBUG("%s: Translation fault interrupt 0x%lx on CPT VF "
350 "%d", cptvf->dev_name, (unsigned int long)intr,
352 } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
353 otx_cpt_clear_dovf_intr(cptvf);
354 CPT_LOG_DP_DEBUG("%s: Doorbell overflow interrupt 0x%lx on CPT VF "
355 "%d", cptvf->dev_name, (unsigned int long)intr,
358 CPT_LOG_DP_ERR("%s: Unhandled interrupt 0x%lx in CPT VF %d",
359 cptvf->dev_name, (unsigned int long)intr,
364 otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name)
366 memset(cptvf, 0, sizeof(struct cpt_vf));
368 /* Bar0 base address */
369 cptvf->reg_base = reg_base;
371 /* Save device name */
372 strlcpy(cptvf->dev_name, name, (sizeof(cptvf->dev_name)));
376 /* To clear if there are any pending mbox msgs */
377 otx_cpt_poll_misc(cptvf);
379 if (otx_cpt_vf_init(cptvf)) {
380 CPT_LOG_ERR("Failed to initialize CPT VF device");
388 otx_cpt_deinit_device(void *dev)
390 struct cpt_vf *cptvf = (struct cpt_vf *)dev;
392 /* Do misc work one last time */
393 otx_cpt_poll_misc(cptvf);
399 otx_cpt_get_resource(void *dev, uint8_t group, struct cpt_instance **instance)
401 int ret = -ENOENT, len, qlen, i;
402 int chunk_len, chunks, chunk_size;
403 struct cpt_vf *cptvf = (struct cpt_vf *)dev;
404 struct cpt_instance *cpt_instance;
405 struct command_chunk *chunk_head = NULL, *chunk_prev = NULL;
406 struct command_chunk *chunk = NULL;
408 const struct rte_memzone *rz;
409 uint64_t dma_addr = 0, alloc_len, used_len;
411 uint64_t pg_sz = sysconf(_SC_PAGESIZE);
413 CPT_LOG_DP_DEBUG("Initializing cpt resource %s", cptvf->dev_name);
415 cpt_instance = &cptvf->instance;
417 memset(&cptvf->cqueue, 0, sizeof(cptvf->cqueue));
418 memset(&cptvf->pqueue, 0, sizeof(cptvf->pqueue));
420 /* Chunks are of fixed size buffers */
421 chunks = DEFAULT_CMD_QCHUNKS;
422 chunk_len = DEFAULT_CMD_QCHUNK_SIZE;
424 qlen = chunks * chunk_len;
425 /* Chunk size includes 8 bytes of next chunk ptr */
426 chunk_size = chunk_len * CPT_INST_SIZE + CPT_NEXT_CHUNK_PTR_SIZE;
428 /* For command chunk structures */
429 len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8);
431 /* For pending queue */
432 len += qlen * RTE_ALIGN(sizeof(struct rid), 8);
434 /* So that instruction queues start as pg size aligned */
435 len = RTE_ALIGN(len, pg_sz);
437 /* For Instruction queues */
438 len += chunks * RTE_ALIGN(chunk_size, 128);
440 /* Wastage after instruction queues */
441 len = RTE_ALIGN(len, pg_sz);
443 rz = rte_memzone_reserve_aligned(cptvf->dev_name, len, cptvf->node,
444 RTE_MEMZONE_SIZE_HINT_ONLY |
446 RTE_CACHE_LINE_SIZE);
453 dma_addr = rz->phys_addr;
458 cpt_instance->rsvd = (uintptr_t)rz;
460 /* Pending queue setup */
461 cptvf->pqueue.rid_queue = (struct rid *)mem;
462 cptvf->pqueue.enq_tail = 0;
463 cptvf->pqueue.deq_head = 0;
464 cptvf->pqueue.pending_count = 0;
466 mem += qlen * RTE_ALIGN(sizeof(struct rid), 8);
467 len -= qlen * RTE_ALIGN(sizeof(struct rid), 8);
468 dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8);
470 /* Alignment wastage */
471 used_len = alloc_len - len;
472 mem += RTE_ALIGN(used_len, pg_sz) - used_len;
473 len -= RTE_ALIGN(used_len, pg_sz) - used_len;
474 dma_addr += RTE_ALIGN(used_len, pg_sz) - used_len;
476 /* Init instruction queues */
477 chunk_head = &cptvf->cqueue.chead[0];
481 for (i = 0; i < DEFAULT_CMD_QCHUNKS; i++) {
484 chunk = &cptvf->cqueue.chead[i];
486 chunk->dma_addr = dma_addr;
488 csize = RTE_ALIGN(chunk_size, 128);
494 next_ptr = (uint64_t *)(chunk_prev->head +
496 *next_ptr = (uint64_t)chunk->dma_addr;
501 next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8);
502 *next_ptr = (uint64_t)chunk_head->dma_addr;
506 /* This is used for CPT(0)_PF_Q(0..15)_CTL.size config */
507 cptvf->qsize = chunk_size / 8;
508 cptvf->cqueue.qhead = chunk_head->head;
509 cptvf->cqueue.idx = 0;
510 cptvf->cqueue.cchunk = 0;
512 if (cpt_vq_init(cptvf, group)) {
513 CPT_LOG_ERR("Failed to initialize CPT VQ of device %s",
519 *instance = cpt_instance;
521 CPT_LOG_DP_DEBUG("Crypto device (%s) initialized", cptvf->dev_name);
525 rte_memzone_free(rz);
531 otx_cpt_put_resource(struct cpt_instance *instance)
533 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
534 struct rte_memzone *rz;
537 CPT_LOG_ERR("Invalid CPTVF handle");
541 CPT_LOG_DP_DEBUG("Releasing cpt device %s", cptvf->dev_name);
543 rz = (struct rte_memzone *)instance->rsvd;
544 rte_memzone_free(rz);
549 otx_cpt_start_device(void *dev)
552 struct cpt_vf *cptvf = (struct cpt_vf *)dev;
554 rc = otx_cpt_send_vf_up(cptvf);
556 CPT_LOG_ERR("Failed to mark CPT VF device %s UP, rc = %d",
557 cptvf->dev_name, rc);
561 if ((cptvf->vftype != SE_TYPE) && (cptvf->vftype != AE_TYPE)) {
562 CPT_LOG_ERR("Fatal error, unexpected vf type %u, for CPT VF "
563 "device %s", cptvf->vftype, cptvf->dev_name);
571 otx_cpt_stop_device(void *dev)
574 uint32_t pending, retries = 5;
575 struct cpt_vf *cptvf = (struct cpt_vf *)dev;
577 /* Wait for pending entries to complete */
578 pending = otx_cpt_read_vq_doorbell(cptvf);
580 CPT_LOG_DP_DEBUG("%s: Waiting for pending %u cmds to complete",
581 cptvf->dev_name, pending);
583 pending = otx_cpt_read_vq_doorbell(cptvf);
589 if (!retries && pending) {
590 CPT_LOG_ERR("%s: Timeout waiting for commands(%u)",
591 cptvf->dev_name, pending);
595 rc = otx_cpt_send_vf_down(cptvf);
597 CPT_LOG_ERR("Failed to bring down vf %s, rc %d",
598 cptvf->dev_name, rc);