1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 nix_err_intr_enb_dis(struct nix *nix, bool enb)
11 /* Enable all nix lf error irqs except RQ_DISABLED and CQ_DISABLED */
13 plt_write64(~(BIT_ULL(11) | BIT_ULL(24)),
14 nix->base + NIX_LF_ERR_INT_ENA_W1S);
16 plt_write64(~0ull, nix->base + NIX_LF_ERR_INT_ENA_W1C);
20 nix_ras_intr_enb_dis(struct nix *nix, bool enb)
23 plt_write64(~0ull, nix->base + NIX_LF_RAS_ENA_W1S);
25 plt_write64(~0ull, nix->base + NIX_LF_RAS_ENA_W1C);
29 roc_nix_rx_queue_intr_enable(struct roc_nix *roc_nix, uint16_t rx_queue_id)
31 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
33 /* Enable CINT interrupt */
34 plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_ENA_W1S(rx_queue_id));
38 roc_nix_rx_queue_intr_disable(struct roc_nix *roc_nix, uint16_t rx_queue_id)
40 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
42 /* Clear and disable CINT interrupt */
43 plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_ENA_W1C(rx_queue_id));
47 roc_nix_err_intr_ena_dis(struct roc_nix *roc_nix, bool enb)
49 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
51 return nix_err_intr_enb_dis(nix, enb);
55 roc_nix_ras_intr_ena_dis(struct roc_nix *roc_nix, bool enb)
57 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
59 return nix_ras_intr_enb_dis(nix, enb);
63 nix_lf_err_irq(void *param)
65 struct nix *nix = (struct nix *)param;
66 struct dev *dev = &nix->dev;
69 intr = plt_read64(nix->base + NIX_LF_ERR_INT);
73 plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
76 plt_write64(intr, nix->base + NIX_LF_ERR_INT);
77 /* Dump registers to std out */
78 roc_nix_lf_reg_dump(nix_priv_to_roc_nix(nix), NULL);
79 roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix));
83 nix_lf_register_err_irq(struct nix *nix)
85 struct plt_intr_handle *handle = nix->pci_dev->intr_handle;
88 vec = nix->msixoff + NIX_LF_INT_VEC_ERR_INT;
89 /* Clear err interrupt */
90 nix_err_intr_enb_dis(nix, false);
91 /* Set used interrupt vectors */
92 rc = dev_irq_register(handle, nix_lf_err_irq, nix, vec);
93 /* Enable all dev interrupt except for RQ_DISABLED */
94 nix_err_intr_enb_dis(nix, true);
100 nix_lf_unregister_err_irq(struct nix *nix)
102 struct plt_intr_handle *handle = nix->pci_dev->intr_handle;
105 vec = nix->msixoff + NIX_LF_INT_VEC_ERR_INT;
106 /* Clear err interrupt */
107 nix_err_intr_enb_dis(nix, false);
108 dev_irq_unregister(handle, nix_lf_err_irq, nix, vec);
112 nix_lf_ras_irq(void *param)
114 struct nix *nix = (struct nix *)param;
115 struct dev *dev = &nix->dev;
118 intr = plt_read64(nix->base + NIX_LF_RAS);
122 plt_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
123 /* Clear interrupt */
124 plt_write64(intr, nix->base + NIX_LF_RAS);
126 /* Dump registers to std out */
127 roc_nix_lf_reg_dump(nix_priv_to_roc_nix(nix), NULL);
128 roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix));
132 nix_lf_register_ras_irq(struct nix *nix)
134 struct plt_intr_handle *handle = nix->pci_dev->intr_handle;
137 vec = nix->msixoff + NIX_LF_INT_VEC_POISON;
138 /* Clear err interrupt */
139 nix_ras_intr_enb_dis(nix, false);
140 /* Set used interrupt vectors */
141 rc = dev_irq_register(handle, nix_lf_ras_irq, nix, vec);
142 /* Enable dev interrupt */
143 nix_ras_intr_enb_dis(nix, true);
149 nix_lf_unregister_ras_irq(struct nix *nix)
151 struct plt_intr_handle *handle = nix->pci_dev->intr_handle;
154 vec = nix->msixoff + NIX_LF_INT_VEC_POISON;
155 /* Clear err interrupt */
156 nix_ras_intr_enb_dis(nix, false);
157 dev_irq_unregister(handle, nix_lf_ras_irq, nix, vec);
160 static inline uint8_t
161 nix_lf_q_irq_get_and_clear(struct nix *nix, uint16_t q, uint32_t off,
167 wdata = (uint64_t)q << 44;
168 reg = roc_atomic64_add_nosync(wdata, (int64_t *)(nix->base + off));
170 if (reg & BIT_ULL(42) /* OP_ERR */) {
171 plt_err("Failed execute irq get off=0x%x", off);
176 plt_write64(wdata | qint, nix->base + off);
181 static inline uint8_t
182 nix_lf_rq_irq_get_and_clear(struct nix *nix, uint16_t rq)
184 return nix_lf_q_irq_get_and_clear(nix, rq, NIX_LF_RQ_OP_INT, ~0xff00);
187 static inline uint8_t
188 nix_lf_cq_irq_get_and_clear(struct nix *nix, uint16_t cq)
190 return nix_lf_q_irq_get_and_clear(nix, cq, NIX_LF_CQ_OP_INT, ~0xff00);
193 static inline uint8_t
194 nix_lf_sq_irq_get_and_clear(struct nix *nix, uint16_t sq)
196 return nix_lf_q_irq_get_and_clear(nix, sq, NIX_LF_SQ_OP_INT, ~0x1ff00);
200 nix_lf_is_sqb_null(struct dev *dev, int q)
202 bool is_sqb_null = false;
206 rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_SQ, q, &ctx);
208 plt_err("Failed to get sq context");
211 roc_model_is_cn9k() ?
212 (((__io struct nix_sq_ctx_s *)ctx)->next_sqb ==
214 (((__io struct nix_cn10k_sq_ctx_s *)ctx)
221 static inline uint8_t
222 nix_lf_sq_debug_reg(struct nix *nix, uint32_t off)
227 reg = plt_read64(nix->base + off);
228 if (reg & BIT_ULL(44)) {
230 /* Clear valid bit */
231 plt_write64(BIT_ULL(44), nix->base + off);
238 nix_lf_cq_irq(void *param)
240 struct nix_qint *cint = (struct nix_qint *)param;
241 struct nix *nix = cint->nix;
243 /* Clear interrupt */
244 plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_INT(cint->qintx));
248 nix_lf_q_irq(void *param)
250 struct nix_qint *qint = (struct nix_qint *)param;
251 uint8_t irq, qintx = qint->qintx;
252 struct nix *nix = qint->nix;
253 struct dev *dev = &nix->dev;
258 intr = plt_read64(nix->base + NIX_LF_QINTX_INT(qintx));
262 plt_err("Queue_intr=0x%" PRIx64 " qintx=%d pf=%d, vf=%d", intr, qintx,
265 /* Handle RQ interrupts */
266 for (q = 0; q < nix->nb_rx_queues; q++) {
268 irq = nix_lf_rq_irq_get_and_clear(nix, rq);
270 if (irq & BIT_ULL(NIX_RQINT_DROP))
271 plt_err("RQ=%d NIX_RQINT_DROP", rq);
273 if (irq & BIT_ULL(NIX_RQINT_RED))
274 plt_err("RQ=%d NIX_RQINT_RED", rq);
277 /* Handle CQ interrupts */
278 for (q = 0; q < nix->nb_rx_queues; q++) {
280 irq = nix_lf_cq_irq_get_and_clear(nix, cq);
282 if (irq & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
283 plt_err("CQ=%d NIX_CQERRINT_DOOR_ERR", cq);
285 if (irq & BIT_ULL(NIX_CQERRINT_WR_FULL))
286 plt_err("CQ=%d NIX_CQERRINT_WR_FULL", cq);
288 if (irq & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
289 plt_err("CQ=%d NIX_CQERRINT_CQE_FAULT", cq);
292 /* Handle SQ interrupts */
293 for (q = 0; q < nix->nb_tx_queues; q++) {
295 irq = nix_lf_sq_irq_get_and_clear(nix, sq);
297 /* Detect LMT store error */
298 rc = nix_lf_sq_debug_reg(nix, NIX_LF_SQ_OP_ERR_DBG);
300 plt_err("SQ=%d NIX_SQINT_LMT_ERR, errcode %x", sq, rc);
302 /* Detect Meta-descriptor enqueue error */
303 rc = nix_lf_sq_debug_reg(nix, NIX_LF_MNQ_ERR_DBG);
305 plt_err("SQ=%d NIX_SQINT_MNQ_ERR, errcode %x", sq, rc);
307 /* Detect Send error */
308 rc = nix_lf_sq_debug_reg(nix, NIX_LF_SEND_ERR_DBG);
310 plt_err("SQ=%d NIX_SQINT_SEND_ERR, errcode %x", sq, rc);
312 /* Detect SQB fault, read SQ context to check SQB NULL case */
313 if (irq & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL) ||
314 nix_lf_is_sqb_null(dev, q))
315 plt_err("SQ=%d NIX_SQINT_SQB_ALLOC_FAIL", sq);
318 /* Clear interrupt */
319 plt_write64(intr, nix->base + NIX_LF_QINTX_INT(qintx));
321 /* Dump registers to std out */
322 roc_nix_lf_reg_dump(nix_priv_to_roc_nix(nix), NULL);
323 roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix));
327 roc_nix_register_queue_irqs(struct roc_nix *roc_nix)
329 int vec, q, sqs, rqs, qs, rc = 0;
330 struct plt_intr_handle *handle;
333 nix = roc_nix_to_nix_priv(roc_nix);
334 handle = nix->pci_dev->intr_handle;
336 /* Figure out max qintx required */
337 rqs = PLT_MIN(nix->qints, nix->nb_rx_queues);
338 sqs = PLT_MIN(nix->qints, nix->nb_tx_queues);
339 qs = PLT_MAX(rqs, sqs);
341 nix->configured_qints = qs;
344 plt_zmalloc(nix->configured_qints * sizeof(struct nix_qint), 0);
345 if (nix->qints_mem == NULL)
348 for (q = 0; q < qs; q++) {
349 vec = nix->msixoff + NIX_LF_INT_VEC_QINT_START + q;
352 plt_write64(0, nix->base + NIX_LF_QINTX_CNT(q));
354 /* Clear interrupt */
355 plt_write64(~0ull, nix->base + NIX_LF_QINTX_ENA_W1C(q));
357 nix->qints_mem[q].nix = nix;
358 nix->qints_mem[q].qintx = q;
360 /* Sync qints_mem update */
363 /* Register queue irq vector */
364 rc = dev_irq_register(handle, nix_lf_q_irq, &nix->qints_mem[q],
369 plt_write64(0, nix->base + NIX_LF_QINTX_CNT(q));
370 plt_write64(0, nix->base + NIX_LF_QINTX_INT(q));
371 /* Enable QINT interrupt */
372 plt_write64(~0ull, nix->base + NIX_LF_QINTX_ENA_W1S(q));
379 roc_nix_unregister_queue_irqs(struct roc_nix *roc_nix)
381 struct plt_intr_handle *handle;
385 nix = roc_nix_to_nix_priv(roc_nix);
386 handle = nix->pci_dev->intr_handle;
388 for (q = 0; q < nix->configured_qints; q++) {
389 vec = nix->msixoff + NIX_LF_INT_VEC_QINT_START + q;
392 plt_write64(0, nix->base + NIX_LF_QINTX_CNT(q));
393 plt_write64(0, nix->base + NIX_LF_QINTX_INT(q));
395 /* Clear interrupt */
396 plt_write64(~0ull, nix->base + NIX_LF_QINTX_ENA_W1C(q));
398 /* Unregister queue irq vector */
399 dev_irq_unregister(handle, nix_lf_q_irq, &nix->qints_mem[q],
402 nix->configured_qints = 0;
404 plt_free(nix->qints_mem);
405 nix->qints_mem = NULL;
409 roc_nix_register_cq_irqs(struct roc_nix *roc_nix)
411 struct plt_intr_handle *handle;
412 uint8_t rc = 0, vec, q;
415 nix = roc_nix_to_nix_priv(roc_nix);
416 handle = nix->pci_dev->intr_handle;
418 nix->configured_cints = PLT_MIN(nix->cints, nix->nb_rx_queues);
421 plt_zmalloc(nix->configured_cints * sizeof(struct nix_qint), 0);
422 if (nix->cints_mem == NULL)
425 for (q = 0; q < nix->configured_cints; q++) {
426 vec = nix->msixoff + NIX_LF_INT_VEC_CINT_START + q;
429 plt_write64(0, nix->base + NIX_LF_CINTX_CNT(q));
431 /* Clear interrupt */
432 plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_ENA_W1C(q));
434 nix->cints_mem[q].nix = nix;
435 nix->cints_mem[q].qintx = q;
437 /* Sync cints_mem update */
440 /* Register queue irq vector */
441 rc = dev_irq_register(handle, nix_lf_cq_irq, &nix->cints_mem[q],
444 plt_err("Fail to register CQ irq, rc=%d", rc);
448 rc = plt_intr_vec_list_alloc(handle, "cnxk",
449 nix->configured_cints);
451 plt_err("Fail to allocate intr vec list, rc=%d",
455 /* VFIO vector zero is reserved for misc interrupt so
456 * doing required adjustment. (b13bfab4cd)
458 if (plt_intr_vec_list_index_set(handle, q,
459 PLT_INTR_VEC_RXTX_OFFSET + vec))
462 /* Configure CQE interrupt coalescing parameters */
463 plt_write64(((CQ_CQE_THRESH_DEFAULT) |
464 (CQ_CQE_THRESH_DEFAULT << 32) |
465 (CQ_TIMER_THRESH_DEFAULT << 48)),
466 nix->base + NIX_LF_CINTX_WAIT((q)));
468 /* Keeping the CQ interrupt disabled as the rx interrupt
469 * feature needs to be enabled/disabled on demand.
477 roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix)
479 struct plt_intr_handle *handle;
483 nix = roc_nix_to_nix_priv(roc_nix);
484 handle = nix->pci_dev->intr_handle;
486 for (q = 0; q < nix->configured_cints; q++) {
487 vec = nix->msixoff + NIX_LF_INT_VEC_CINT_START + q;
490 plt_write64(0, nix->base + NIX_LF_CINTX_CNT(q));
492 /* Clear interrupt */
493 plt_write64(BIT_ULL(0), nix->base + NIX_LF_CINTX_ENA_W1C(q));
495 /* Unregister queue irq vector */
496 dev_irq_unregister(handle, nix_lf_cq_irq, &nix->cints_mem[q],
500 plt_intr_vec_list_free(handle);
501 plt_free(nix->cints_mem);
505 nix_register_irqs(struct nix *nix)
509 if (nix->msixoff == MSIX_VECTOR_INVALID) {
510 plt_err("Invalid NIXLF MSIX vector offset vector: 0x%x",
512 return NIX_ERR_PARAM;
515 /* Register lf err interrupt */
516 rc = nix_lf_register_err_irq(nix);
517 /* Register RAS interrupt */
518 rc |= nix_lf_register_ras_irq(nix);
524 nix_unregister_irqs(struct nix *nix)
526 nix_lf_unregister_err_irq(nix);
527 nix_lf_unregister_ras_irq(nix);