1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_bus_pci.h>
9 #include "otx2_ethdev.h"
12 nix_lf_err_irq(void *param)
14 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
15 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
18 intr = otx2_read64(dev->base + NIX_LF_ERR_INT);
22 otx2_err("Err_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
25 otx2_write64(intr, dev->base + NIX_LF_ERR_INT);
27 otx2_nix_queues_ctx_dump(eth_dev);
31 nix_lf_register_err_irq(struct rte_eth_dev *eth_dev)
33 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
34 struct rte_intr_handle *handle = &pci_dev->intr_handle;
35 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
38 vec = dev->nix_msixoff + NIX_LF_INT_VEC_ERR_INT;
40 /* Clear err interrupt */
41 otx2_write64(~0ull, dev->base + NIX_LF_ERR_INT_ENA_W1C);
42 /* Set used interrupt vectors */
43 rc = otx2_register_irq(handle, nix_lf_err_irq, eth_dev, vec);
44 /* Enable all dev interrupt except for RQ_DISABLED */
45 otx2_write64(~BIT_ULL(11), dev->base + NIX_LF_ERR_INT_ENA_W1S);
51 nix_lf_unregister_err_irq(struct rte_eth_dev *eth_dev)
53 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
54 struct rte_intr_handle *handle = &pci_dev->intr_handle;
55 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
58 vec = dev->nix_msixoff + NIX_LF_INT_VEC_ERR_INT;
60 /* Clear err interrupt */
61 otx2_write64(~0ull, dev->base + NIX_LF_ERR_INT_ENA_W1C);
62 otx2_unregister_irq(handle, nix_lf_err_irq, eth_dev, vec);
66 nix_lf_ras_irq(void *param)
68 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
69 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
72 intr = otx2_read64(dev->base + NIX_LF_RAS);
76 otx2_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
79 otx2_write64(intr, dev->base + NIX_LF_RAS);
81 otx2_nix_queues_ctx_dump(eth_dev);
85 nix_lf_register_ras_irq(struct rte_eth_dev *eth_dev)
87 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
88 struct rte_intr_handle *handle = &pci_dev->intr_handle;
89 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
92 vec = dev->nix_msixoff + NIX_LF_INT_VEC_POISON;
94 /* Clear err interrupt */
95 otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1C);
96 /* Set used interrupt vectors */
97 rc = otx2_register_irq(handle, nix_lf_ras_irq, eth_dev, vec);
98 /* Enable dev interrupt */
99 otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1S);
105 nix_lf_unregister_ras_irq(struct rte_eth_dev *eth_dev)
107 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
108 struct rte_intr_handle *handle = &pci_dev->intr_handle;
109 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
112 vec = dev->nix_msixoff + NIX_LF_INT_VEC_POISON;
114 /* Clear err interrupt */
115 otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1C);
116 otx2_unregister_irq(handle, nix_lf_ras_irq, eth_dev, vec);
119 static inline uint8_t
120 nix_lf_q_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t q,
121 uint32_t off, uint64_t mask)
126 wdata = (uint64_t)q << 44;
127 reg = otx2_atomic64_add_nosync(wdata, (int64_t *)(dev->base + off));
129 if (reg & BIT_ULL(42) /* OP_ERR */) {
130 otx2_err("Failed execute irq get off=0x%x", off);
136 otx2_write64(wdata, dev->base + off);
141 static inline uint8_t
142 nix_lf_rq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t rq)
144 return nix_lf_q_irq_get_and_clear(dev, rq, NIX_LF_RQ_OP_INT, ~0xff00);
147 static inline uint8_t
148 nix_lf_cq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t cq)
150 return nix_lf_q_irq_get_and_clear(dev, cq, NIX_LF_CQ_OP_INT, ~0xff00);
153 static inline uint8_t
154 nix_lf_sq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t sq)
156 return nix_lf_q_irq_get_and_clear(dev, sq, NIX_LF_SQ_OP_INT, ~0x1ff00);
160 nix_lf_sq_debug_reg(struct otx2_eth_dev *dev, uint32_t off)
164 reg = otx2_read64(dev->base + off);
165 if (reg & BIT_ULL(44))
166 otx2_err("SQ=%d err_code=0x%x",
167 (int)((reg >> 8) & 0xfffff), (uint8_t)(reg & 0xff));
171 nix_lf_q_irq(void *param)
173 struct otx2_qint *qint = (struct otx2_qint *)param;
174 struct rte_eth_dev *eth_dev = qint->eth_dev;
175 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
176 uint8_t irq, qintx = qint->qintx;
180 intr = otx2_read64(dev->base + NIX_LF_QINTX_INT(qintx));
184 otx2_err("Queue_intr=0x%" PRIx64 " qintx=%d pf=%d, vf=%d",
185 intr, qintx, dev->pf, dev->vf);
187 /* Handle RQ interrupts */
188 for (q = 0; q < eth_dev->data->nb_rx_queues; q++) {
190 irq = nix_lf_rq_irq_get_and_clear(dev, rq);
192 if (irq & BIT_ULL(NIX_RQINT_DROP))
193 otx2_err("RQ=%d NIX_RQINT_DROP", rq);
195 if (irq & BIT_ULL(NIX_RQINT_RED))
196 otx2_err("RQ=%d NIX_RQINT_RED", rq);
199 /* Handle CQ interrupts */
200 for (q = 0; q < eth_dev->data->nb_rx_queues; q++) {
202 irq = nix_lf_cq_irq_get_and_clear(dev, cq);
204 if (irq & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
205 otx2_err("CQ=%d NIX_CQERRINT_DOOR_ERR", cq);
207 if (irq & BIT_ULL(NIX_CQERRINT_WR_FULL))
208 otx2_err("CQ=%d NIX_CQERRINT_WR_FULL", cq);
210 if (irq & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
211 otx2_err("CQ=%d NIX_CQERRINT_CQE_FAULT", cq);
214 /* Handle SQ interrupts */
215 for (q = 0; q < eth_dev->data->nb_tx_queues; q++) {
217 irq = nix_lf_sq_irq_get_and_clear(dev, sq);
219 if (irq & BIT_ULL(NIX_SQINT_LMT_ERR)) {
220 otx2_err("SQ=%d NIX_SQINT_LMT_ERR", sq);
221 nix_lf_sq_debug_reg(dev, NIX_LF_SQ_OP_ERR_DBG);
223 if (irq & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
224 otx2_err("SQ=%d NIX_SQINT_MNQ_ERR", sq);
225 nix_lf_sq_debug_reg(dev, NIX_LF_MNQ_ERR_DBG);
227 if (irq & BIT_ULL(NIX_SQINT_SEND_ERR)) {
228 otx2_err("SQ=%d NIX_SQINT_SEND_ERR", sq);
229 nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG);
231 if (irq & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) {
232 otx2_err("SQ=%d NIX_SQINT_SQB_ALLOC_FAIL", sq);
233 nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG);
237 /* Clear interrupt */
238 otx2_write64(intr, dev->base + NIX_LF_QINTX_INT(qintx));
240 otx2_nix_queues_ctx_dump(eth_dev);
244 oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev)
246 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
247 struct rte_intr_handle *handle = &pci_dev->intr_handle;
248 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
249 int vec, q, sqs, rqs, qs, rc = 0;
251 /* Figure out max qintx required */
252 rqs = RTE_MIN(dev->qints, eth_dev->data->nb_rx_queues);
253 sqs = RTE_MIN(dev->qints, eth_dev->data->nb_tx_queues);
254 qs = RTE_MAX(rqs, sqs);
256 dev->configured_qints = qs;
258 for (q = 0; q < qs; q++) {
259 vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q;
262 otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
264 /* Clear interrupt */
265 otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q));
267 dev->qints_mem[q].eth_dev = eth_dev;
268 dev->qints_mem[q].qintx = q;
270 /* Sync qints_mem update */
273 /* Register queue irq vector */
274 rc = otx2_register_irq(handle, nix_lf_q_irq,
275 &dev->qints_mem[q], vec);
279 otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
280 otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q));
281 /* Enable QINT interrupt */
282 otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1S(q));
289 oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev)
291 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
292 struct rte_intr_handle *handle = &pci_dev->intr_handle;
293 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
296 for (q = 0; q < dev->configured_qints; q++) {
297 vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q;
300 otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
301 otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q));
303 /* Clear interrupt */
304 otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q));
306 /* Unregister queue irq vector */
307 otx2_unregister_irq(handle, nix_lf_q_irq,
308 &dev->qints_mem[q], vec);
313 otx2_nix_register_irqs(struct rte_eth_dev *eth_dev)
315 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
318 if (dev->nix_msixoff == MSIX_VECTOR_INVALID) {
319 otx2_err("Invalid NIXLF MSIX vector offset vector: 0x%x",
324 /* Register lf err interrupt */
325 rc = nix_lf_register_err_irq(eth_dev);
326 /* Register RAS interrupt */
327 rc |= nix_lf_register_ras_irq(eth_dev);
333 otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev)
335 nix_lf_unregister_err_irq(eth_dev);
336 nix_lf_unregister_ras_irq(eth_dev);