net/octeontx2: handle queue specific error interrupts
authorJerin Jacob <jerinj@marvell.com>
Wed, 29 May 2019 05:00:14 +0000 (10:30 +0530)
committerFerruh Yigit <ferruh.yigit@intel.com>
Thu, 4 Jul 2019 23:52:01 +0000 (01:52 +0200)
Handle queue specific error interrupts.

Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
doc/guides/nics/octeontx2.rst
drivers/net/octeontx2/otx2_ethdev.c
drivers/net/octeontx2/otx2_ethdev.h
drivers/net/octeontx2/otx2_ethdev_irq.c

index e3f4c2c..50e8259 100644 (file)
@@ -18,6 +18,7 @@ Features of the OCTEON TX2 Ethdev PMD are:
 
 - SR-IOV VF
 - Lock-free Tx queue
+- Debug utilities - error interrupt support
 
 Prerequisites
 -------------
index 65d72a4..045855c 100644 (file)
@@ -163,8 +163,10 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
        }
 
        /* Free the resources allocated from the previous configure */
-       if (dev->configured == 1)
+       if (dev->configured == 1) {
+               oxt2_nix_unregister_queue_irqs(eth_dev);
                nix_lf_free(dev);
+       }
 
        if (otx2_dev_is_A0(dev) &&
            (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
@@ -189,6 +191,13 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
                goto fail;
        }
 
+       /* Register queue IRQs */
+       rc = oxt2_nix_register_queue_irqs(eth_dev);
+       if (rc) {
+               otx2_err("Failed to register queue interrupts rc=%d", rc);
+               goto free_nix_lf;
+       }
+
        /* Update the mac address */
        ea = eth_dev->data->mac_addrs;
        memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
@@ -210,6 +219,8 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
        dev->configured_nb_tx_qs = data->nb_tx_queues;
        return 0;
 
+free_nix_lf:
+       rc = nix_lf_free(dev);
 fail:
        return rc;
 }
@@ -413,6 +424,9 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       /* Unregister queue irqs */
+       oxt2_nix_unregister_queue_irqs(eth_dev);
+
        rc = nix_lf_free(dev);
        if (rc)
                otx2_err("Failed to free nix lf, rc=%d", rc);
index c1528e2..d9cdd33 100644 (file)
        DEV_RX_OFFLOAD_QINQ_STRIP | \
        DEV_RX_OFFLOAD_TIMESTAMP)
 
+struct otx2_qint {
+       struct rte_eth_dev *eth_dev;
+       uint8_t qintx;
+};
+
 struct otx2_rss_info {
        uint16_t rss_size;
        uint8_t rss_grps;
@@ -134,6 +139,7 @@ struct otx2_eth_dev {
        uint16_t cints;
        uint16_t qints;
        uint8_t configured;
+       uint8_t configured_qints;
        uint8_t configured_nb_rx_qs;
        uint8_t configured_nb_tx_qs;
        uint16_t nix_msixoff;
@@ -147,6 +153,7 @@ struct otx2_eth_dev {
        uint64_t tx_offloads;
        uint64_t rx_offload_capa;
        uint64_t tx_offload_capa;
+       struct otx2_qint qints_mem[RTE_MAX_QUEUES_PER_PORT];
        struct otx2_rss_info rss_info;
        struct otx2_npc_flow_info npc_flow;
 } __rte_cache_aligned;
@@ -163,7 +170,9 @@ void otx2_nix_info_get(struct rte_eth_dev *eth_dev,
 
 /* IRQ */
 int otx2_nix_register_irqs(struct rte_eth_dev *eth_dev);
+int oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev);
 void otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev);
+void oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev);
 
 /* CGX */
 int otx2_cgx_rxtx_start(struct otx2_eth_dev *dev);
index 33fed93..476c7ea 100644 (file)
@@ -112,6 +112,197 @@ nix_lf_unregister_ras_irq(struct rte_eth_dev *eth_dev)
        otx2_unregister_irq(handle, nix_lf_ras_irq, eth_dev, vec);
 }
 
+static inline uint8_t
+nix_lf_q_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t q,
+                          uint32_t off, uint64_t mask)
+{
+       uint64_t reg, wdata;
+       uint8_t qint;
+
+       wdata = (uint64_t)q << 44;
+       reg = otx2_atomic64_add_nosync(wdata, (int64_t *)(dev->base + off));
+
+       if (reg & BIT_ULL(42) /* OP_ERR */) {
+               otx2_err("Failed execute irq get off=0x%x", off);
+               return 0;
+       }
+
+       qint = reg & 0xff;
+       wdata &= mask;
+       otx2_write64(wdata, dev->base + off);
+
+       return qint;
+}
+
+static inline uint8_t
+nix_lf_rq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t rq)
+{
+       return nix_lf_q_irq_get_and_clear(dev, rq, NIX_LF_RQ_OP_INT, ~0xff00);
+}
+
+static inline uint8_t
+nix_lf_cq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t cq)
+{
+       return nix_lf_q_irq_get_and_clear(dev, cq, NIX_LF_CQ_OP_INT, ~0xff00);
+}
+
+static inline uint8_t
+nix_lf_sq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t sq)
+{
+       return nix_lf_q_irq_get_and_clear(dev, sq, NIX_LF_SQ_OP_INT, ~0x1ff00);
+}
+
+static inline void
+nix_lf_sq_debug_reg(struct otx2_eth_dev *dev, uint32_t off)
+{
+       uint64_t reg;
+
+       reg = otx2_read64(dev->base + off);
+       if (reg & BIT_ULL(44))
+               otx2_err("SQ=%d err_code=0x%x",
+                        (int)((reg >> 8) & 0xfffff), (uint8_t)(reg & 0xff));
+}
+
+static void
+nix_lf_q_irq(void *param)
+{
+       struct otx2_qint *qint = (struct otx2_qint *)param;
+       struct rte_eth_dev *eth_dev = qint->eth_dev;
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       uint8_t irq, qintx = qint->qintx;
+       int q, cq, rq, sq;
+       uint64_t intr;
+
+       intr = otx2_read64(dev->base + NIX_LF_QINTX_INT(qintx));
+       if (intr == 0)
+               return;
+
+       otx2_err("Queue_intr=0x%" PRIx64 " qintx=%d pf=%d, vf=%d",
+                intr, qintx, dev->pf, dev->vf);
+
+       /* Handle RQ interrupts */
+       for (q = 0; q < eth_dev->data->nb_rx_queues; q++) {
+               rq = q % dev->qints;
+               irq = nix_lf_rq_irq_get_and_clear(dev, rq);
+
+               if (irq & BIT_ULL(NIX_RQINT_DROP))
+                       otx2_err("RQ=%d NIX_RQINT_DROP", rq);
+
+               if (irq & BIT_ULL(NIX_RQINT_RED))
+                       otx2_err("RQ=%d NIX_RQINT_RED", rq);
+       }
+
+       /* Handle CQ interrupts */
+       for (q = 0; q < eth_dev->data->nb_rx_queues; q++) {
+               cq = q % dev->qints;
+               irq = nix_lf_cq_irq_get_and_clear(dev, cq);
+
+               if (irq & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
+                       otx2_err("CQ=%d NIX_CQERRINT_DOOR_ERR", cq);
+
+               if (irq & BIT_ULL(NIX_CQERRINT_WR_FULL))
+                       otx2_err("CQ=%d NIX_CQERRINT_WR_FULL", cq);
+
+               if (irq & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+                       otx2_err("CQ=%d NIX_CQERRINT_CQE_FAULT", cq);
+       }
+
+       /* Handle SQ interrupts */
+       for (q = 0; q < eth_dev->data->nb_tx_queues; q++) {
+               sq = q % dev->qints;
+               irq = nix_lf_sq_irq_get_and_clear(dev, sq);
+
+               if (irq & BIT_ULL(NIX_SQINT_LMT_ERR)) {
+                       otx2_err("SQ=%d NIX_SQINT_LMT_ERR", sq);
+                       nix_lf_sq_debug_reg(dev, NIX_LF_SQ_OP_ERR_DBG);
+               }
+               if (irq & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
+                       otx2_err("SQ=%d NIX_SQINT_MNQ_ERR", sq);
+                       nix_lf_sq_debug_reg(dev, NIX_LF_MNQ_ERR_DBG);
+               }
+               if (irq & BIT_ULL(NIX_SQINT_SEND_ERR)) {
+                       otx2_err("SQ=%d NIX_SQINT_SEND_ERR", sq);
+                       nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG);
+               }
+               if (irq & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) {
+                       otx2_err("SQ=%d NIX_SQINT_SQB_ALLOC_FAIL", sq);
+                       nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG);
+               }
+       }
+
+       /* Clear interrupt */
+       otx2_write64(intr, dev->base + NIX_LF_QINTX_INT(qintx));
+}
+
+int
+oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev)
+{
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+       struct rte_intr_handle *handle = &pci_dev->intr_handle;
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       int vec, q, sqs, rqs, qs, rc = 0;
+
+       /* Figure out max qintx required */
+       rqs = RTE_MIN(dev->qints, eth_dev->data->nb_rx_queues);
+       sqs = RTE_MIN(dev->qints, eth_dev->data->nb_tx_queues);
+       qs  = RTE_MAX(rqs, sqs);
+
+       dev->configured_qints = qs;
+
+       for (q = 0; q < qs; q++) {
+               vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q;
+
+               /* Clear QINT CNT */
+               otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
+
+               /* Clear interrupt */
+               otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q));
+
+               dev->qints_mem[q].eth_dev = eth_dev;
+               dev->qints_mem[q].qintx = q;
+
+               /* Sync qints_mem update */
+               rte_smp_wmb();
+
+               /* Register queue irq vector */
+               rc = otx2_register_irq(handle, nix_lf_q_irq,
+                                      &dev->qints_mem[q], vec);
+               if (rc)
+                       break;
+
+               otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
+               otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q));
+               /* Enable QINT interrupt */
+               otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1S(q));
+       }
+
+       return rc;
+}
+
+void
+oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev)
+{
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+       struct rte_intr_handle *handle = &pci_dev->intr_handle;
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       int vec, q;
+
+       for (q = 0; q < dev->configured_qints; q++) {
+               vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q;
+
+               /* Clear QINT CNT */
+               otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
+               otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q));
+
+               /* Clear interrupt */
+               otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q));
+
+               /* Unregister queue irq vector */
+               otx2_unregister_irq(handle, nix_lf_q_irq,
+                                   &dev->qints_mem[q], vec);
+       }
+}
+
 int
 otx2_nix_register_irqs(struct rte_eth_dev *eth_dev)
 {