1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_bus_pci.h>
8 #include <rte_malloc.h>
10 #include "otx2_ethdev.h"
13 nix_lf_err_irq(void *param)
15 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
16 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
19 intr = otx2_read64(dev->base + NIX_LF_ERR_INT);
23 otx2_err("Err_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
26 otx2_write64(intr, dev->base + NIX_LF_ERR_INT);
28 /* Dump registers to std out */
29 otx2_nix_reg_dump(dev, NULL);
30 otx2_nix_queues_ctx_dump(eth_dev);
34 nix_lf_register_err_irq(struct rte_eth_dev *eth_dev)
36 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
37 struct rte_intr_handle *handle = &pci_dev->intr_handle;
38 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
41 vec = dev->nix_msixoff + NIX_LF_INT_VEC_ERR_INT;
43 /* Clear err interrupt */
44 otx2_write64(~0ull, dev->base + NIX_LF_ERR_INT_ENA_W1C);
45 /* Set used interrupt vectors */
46 rc = otx2_register_irq(handle, nix_lf_err_irq, eth_dev, vec);
47 /* Enable all dev interrupt except for RQ_DISABLED */
48 otx2_write64(~BIT_ULL(11), dev->base + NIX_LF_ERR_INT_ENA_W1S);
54 nix_lf_unregister_err_irq(struct rte_eth_dev *eth_dev)
56 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
57 struct rte_intr_handle *handle = &pci_dev->intr_handle;
58 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
61 vec = dev->nix_msixoff + NIX_LF_INT_VEC_ERR_INT;
63 /* Clear err interrupt */
64 otx2_write64(~0ull, dev->base + NIX_LF_ERR_INT_ENA_W1C);
65 otx2_unregister_irq(handle, nix_lf_err_irq, eth_dev, vec);
69 nix_lf_ras_irq(void *param)
71 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
72 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
75 intr = otx2_read64(dev->base + NIX_LF_RAS);
79 otx2_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
82 otx2_write64(intr, dev->base + NIX_LF_RAS);
84 /* Dump registers to std out */
85 otx2_nix_reg_dump(dev, NULL);
86 otx2_nix_queues_ctx_dump(eth_dev);
90 nix_lf_register_ras_irq(struct rte_eth_dev *eth_dev)
92 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
93 struct rte_intr_handle *handle = &pci_dev->intr_handle;
94 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
97 vec = dev->nix_msixoff + NIX_LF_INT_VEC_POISON;
99 /* Clear err interrupt */
100 otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1C);
101 /* Set used interrupt vectors */
102 rc = otx2_register_irq(handle, nix_lf_ras_irq, eth_dev, vec);
103 /* Enable dev interrupt */
104 otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1S);
110 nix_lf_unregister_ras_irq(struct rte_eth_dev *eth_dev)
112 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
113 struct rte_intr_handle *handle = &pci_dev->intr_handle;
114 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
117 vec = dev->nix_msixoff + NIX_LF_INT_VEC_POISON;
119 /* Clear err interrupt */
120 otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1C);
121 otx2_unregister_irq(handle, nix_lf_ras_irq, eth_dev, vec);
124 static inline uint8_t
125 nix_lf_q_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t q,
126 uint32_t off, uint64_t mask)
131 wdata = (uint64_t)q << 44;
132 reg = otx2_atomic64_add_nosync(wdata, (int64_t *)(dev->base + off));
134 if (reg & BIT_ULL(42) /* OP_ERR */) {
135 otx2_err("Failed execute irq get off=0x%x", off);
141 otx2_write64(wdata | qint, dev->base + off);
146 static inline uint8_t
147 nix_lf_rq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t rq)
149 return nix_lf_q_irq_get_and_clear(dev, rq, NIX_LF_RQ_OP_INT, ~0xff00);
152 static inline uint8_t
153 nix_lf_cq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t cq)
155 return nix_lf_q_irq_get_and_clear(dev, cq, NIX_LF_CQ_OP_INT, ~0xff00);
158 static inline uint8_t
159 nix_lf_sq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t sq)
161 return nix_lf_q_irq_get_and_clear(dev, sq, NIX_LF_SQ_OP_INT, ~0x1ff00);
165 nix_lf_sq_debug_reg(struct otx2_eth_dev *dev, uint32_t off)
169 reg = otx2_read64(dev->base + off);
170 if (reg & BIT_ULL(44))
171 otx2_err("SQ=%d err_code=0x%x",
172 (int)((reg >> 8) & 0xfffff), (uint8_t)(reg & 0xff));
176 nix_lf_cq_irq(void *param)
178 struct otx2_qint *cint = (struct otx2_qint *)param;
179 struct rte_eth_dev *eth_dev = cint->eth_dev;
180 struct otx2_eth_dev *dev;
182 dev = otx2_eth_pmd_priv(eth_dev);
183 /* Clear interrupt */
184 otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_INT(cint->qintx));
188 nix_lf_q_irq(void *param)
190 struct otx2_qint *qint = (struct otx2_qint *)param;
191 struct rte_eth_dev *eth_dev = qint->eth_dev;
192 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
193 uint8_t irq, qintx = qint->qintx;
197 intr = otx2_read64(dev->base + NIX_LF_QINTX_INT(qintx));
201 otx2_err("Queue_intr=0x%" PRIx64 " qintx=%d pf=%d, vf=%d",
202 intr, qintx, dev->pf, dev->vf);
204 /* Handle RQ interrupts */
205 for (q = 0; q < eth_dev->data->nb_rx_queues; q++) {
207 irq = nix_lf_rq_irq_get_and_clear(dev, rq);
209 if (irq & BIT_ULL(NIX_RQINT_DROP))
210 otx2_err("RQ=%d NIX_RQINT_DROP", rq);
212 if (irq & BIT_ULL(NIX_RQINT_RED))
213 otx2_err("RQ=%d NIX_RQINT_RED", rq);
216 /* Handle CQ interrupts */
217 for (q = 0; q < eth_dev->data->nb_rx_queues; q++) {
219 irq = nix_lf_cq_irq_get_and_clear(dev, cq);
221 if (irq & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
222 otx2_err("CQ=%d NIX_CQERRINT_DOOR_ERR", cq);
224 if (irq & BIT_ULL(NIX_CQERRINT_WR_FULL))
225 otx2_err("CQ=%d NIX_CQERRINT_WR_FULL", cq);
227 if (irq & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
228 otx2_err("CQ=%d NIX_CQERRINT_CQE_FAULT", cq);
231 /* Handle SQ interrupts */
232 for (q = 0; q < eth_dev->data->nb_tx_queues; q++) {
234 irq = nix_lf_sq_irq_get_and_clear(dev, sq);
236 if (irq & BIT_ULL(NIX_SQINT_LMT_ERR)) {
237 otx2_err("SQ=%d NIX_SQINT_LMT_ERR", sq);
238 nix_lf_sq_debug_reg(dev, NIX_LF_SQ_OP_ERR_DBG);
240 if (irq & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
241 otx2_err("SQ=%d NIX_SQINT_MNQ_ERR", sq);
242 nix_lf_sq_debug_reg(dev, NIX_LF_MNQ_ERR_DBG);
244 if (irq & BIT_ULL(NIX_SQINT_SEND_ERR)) {
245 otx2_err("SQ=%d NIX_SQINT_SEND_ERR", sq);
246 nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG);
248 if (irq & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) {
249 otx2_err("SQ=%d NIX_SQINT_SQB_ALLOC_FAIL", sq);
250 nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG);
254 /* Clear interrupt */
255 otx2_write64(intr, dev->base + NIX_LF_QINTX_INT(qintx));
257 /* Dump registers to std out */
258 otx2_nix_reg_dump(dev, NULL);
259 otx2_nix_queues_ctx_dump(eth_dev);
263 oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev)
265 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
266 struct rte_intr_handle *handle = &pci_dev->intr_handle;
267 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
268 int vec, q, sqs, rqs, qs, rc = 0;
270 /* Figure out max qintx required */
271 rqs = RTE_MIN(dev->qints, eth_dev->data->nb_rx_queues);
272 sqs = RTE_MIN(dev->qints, eth_dev->data->nb_tx_queues);
273 qs = RTE_MAX(rqs, sqs);
275 dev->configured_qints = qs;
277 for (q = 0; q < qs; q++) {
278 vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q;
281 otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
283 /* Clear interrupt */
284 otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q));
286 dev->qints_mem[q].eth_dev = eth_dev;
287 dev->qints_mem[q].qintx = q;
289 /* Sync qints_mem update */
292 /* Register queue irq vector */
293 rc = otx2_register_irq(handle, nix_lf_q_irq,
294 &dev->qints_mem[q], vec);
298 otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
299 otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q));
300 /* Enable QINT interrupt */
301 otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1S(q));
308 oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev)
310 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
311 struct rte_intr_handle *handle = &pci_dev->intr_handle;
312 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
315 for (q = 0; q < dev->configured_qints; q++) {
316 vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q;
319 otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
320 otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q));
322 /* Clear interrupt */
323 otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q));
325 /* Unregister queue irq vector */
326 otx2_unregister_irq(handle, nix_lf_q_irq,
327 &dev->qints_mem[q], vec);
332 oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev)
334 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
335 struct rte_intr_handle *handle = &pci_dev->intr_handle;
336 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
337 uint8_t rc = 0, vec, q;
339 dev->configured_cints = RTE_MIN(dev->cints,
340 eth_dev->data->nb_rx_queues);
342 for (q = 0; q < dev->configured_cints; q++) {
343 vec = dev->nix_msixoff + NIX_LF_INT_VEC_CINT_START + q;
346 otx2_write64(0, dev->base + NIX_LF_CINTX_CNT(q));
348 /* Clear interrupt */
349 otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_ENA_W1C(q));
351 dev->cints_mem[q].eth_dev = eth_dev;
352 dev->cints_mem[q].qintx = q;
354 /* Sync cints_mem update */
357 /* Register queue irq vector */
358 rc = otx2_register_irq(handle, nix_lf_cq_irq,
359 &dev->cints_mem[q], vec);
361 otx2_err("Fail to register CQ irq, rc=%d", rc);
365 if (!handle->intr_vec) {
366 handle->intr_vec = rte_zmalloc("intr_vec",
367 dev->configured_cints *
369 if (!handle->intr_vec) {
370 otx2_err("Failed to allocate %d rx intr_vec",
371 dev->configured_cints);
375 /* VFIO vector zero is resereved for misc interrupt so
376 * doing required adjustment. (b13bfab4cd)
378 handle->intr_vec[q] = RTE_INTR_VEC_RXTX_OFFSET + vec;
380 /* Configure CQE interrupt coalescing parameters */
381 otx2_write64(((CQ_CQE_THRESH_DEFAULT) |
382 (CQ_CQE_THRESH_DEFAULT << 32) |
383 (CQ_TIMER_THRESH_DEFAULT << 48)),
384 dev->base + NIX_LF_CINTX_WAIT((q)));
386 /* Keeping the CQ interrupt disabled as the rx interrupt
387 * feature needs to be enabled/disabled on demand.
395 oxt2_nix_unregister_cq_irqs(struct rte_eth_dev *eth_dev)
397 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
398 struct rte_intr_handle *handle = &pci_dev->intr_handle;
399 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
402 for (q = 0; q < dev->configured_cints; q++) {
403 vec = dev->nix_msixoff + NIX_LF_INT_VEC_CINT_START + q;
406 otx2_write64(0, dev->base + NIX_LF_CINTX_CNT(q));
408 /* Clear interrupt */
409 otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_ENA_W1C(q));
411 /* Unregister queue irq vector */
412 otx2_unregister_irq(handle, nix_lf_cq_irq,
413 &dev->cints_mem[q], vec);
418 otx2_nix_register_irqs(struct rte_eth_dev *eth_dev)
420 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
423 if (dev->nix_msixoff == MSIX_VECTOR_INVALID) {
424 otx2_err("Invalid NIXLF MSIX vector offset vector: 0x%x",
429 /* Register lf err interrupt */
430 rc = nix_lf_register_err_irq(eth_dev);
431 /* Register RAS interrupt */
432 rc |= nix_lf_register_ras_irq(eth_dev);
438 otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev)
440 nix_lf_unregister_err_irq(eth_dev);
441 nix_lf_unregister_ras_irq(eth_dev);
445 otx2_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
446 uint16_t rx_queue_id)
448 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
450 /* Enable CINT interrupt */
451 otx2_write64(BIT_ULL(0), dev->base +
452 NIX_LF_CINTX_ENA_W1S(rx_queue_id));
458 otx2_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
459 uint16_t rx_queue_id)
461 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
463 /* Clear and disable CINT interrupt */
464 otx2_write64(BIT_ULL(0), dev->base +
465 NIX_LF_CINTX_ENA_W1C(rx_queue_id));