/* Many to one reduction */
aq->cq.qint_idx = qid % dev->qints;
+ /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
+ aq->cq.cint_idx = qid;
if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
uint16_t min_rx_drop;
otx2_nix_vlan_fini(eth_dev);
otx2_flow_free_all_resources(dev);
oxt2_nix_unregister_queue_irqs(eth_dev);
+ if (eth_dev->data->dev_conf.intr_conf.rxq)
+ oxt2_nix_unregister_cq_irqs(eth_dev);
nix_set_nop_rxtx_function(eth_dev);
rc = nix_store_queue_cfg_and_then_release(eth_dev);
if (rc)
goto free_nix_lf;
}
+ /* Register cq IRQs */
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ if (eth_dev->data->nb_rx_queues > dev->cints) {
+ otx2_err("Rx interrupt cannot be enabled, rxq > %d",
+ dev->cints);
+ goto free_nix_lf;
+ }
+ /* Rx interrupt feature cannot work with vector mode because,
+ * vector mode doesn't process packets unless min 4 pkts are
+ * received, while cq interrupts are generated even for 1 pkt
+ * in the CQ.
+ */
+ dev->scalar_ena = true;
+
+ rc = oxt2_nix_register_cq_irqs(eth_dev);
+ if (rc) {
+ otx2_err("Failed to register CQ interrupts rc=%d", rc);
+ goto free_nix_lf;
+ }
+ }
+
/* Configure loop back mode */
rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
if (rc) {
.vlan_strip_queue_set = otx2_nix_vlan_strip_queue_set,
.vlan_tpid_set = otx2_nix_vlan_tpid_set,
.vlan_pvid_set = otx2_nix_vlan_pvid_set,
+ .rx_queue_intr_enable = otx2_nix_rx_queue_intr_enable,
+ .rx_queue_intr_disable = otx2_nix_rx_queue_intr_disable,
};
static inline int
/* Unregister queue irqs */
oxt2_nix_unregister_queue_irqs(eth_dev);
+ /* Unregister cq irqs */
+ if (eth_dev->data->dev_conf.intr_conf.rxq)
+ oxt2_nix_unregister_cq_irqs(eth_dev);
+
rc = nix_lf_free(dev);
if (rc)
otx2_err("Failed to free nix lf, rc=%d", rc);
#define OP_ERR BIT_ULL(CQ_OP_STAT_OP_ERR)
#define CQ_ERR BIT_ULL(CQ_OP_STAT_CQ_ERR)
+#define CQ_CQE_THRESH_DEFAULT 0x1ULL /* IRQ triggered when
+ * NIX_LF_CINTX_CNT[QCOUNT]
+ * crosses this value
+ */
+#define CQ_TIMER_THRESH_DEFAULT 0xAULL /* ~1usec i.e (0xA * 100nsec) */
+#define CQ_TIMER_THRESH_MAX 255
+
#define NIX_RSS_OFFLOAD (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP |\
ETH_RSS_TCP | ETH_RSS_SCTP | \
ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD)
uint16_t qints;
uint8_t configured;
uint8_t configured_qints;
+ uint8_t configured_cints;
uint8_t configured_nb_rx_qs;
uint8_t configured_nb_tx_qs;
uint16_t nix_msixoff;
uint64_t rx_offload_capa;
uint64_t tx_offload_capa;
struct otx2_qint qints_mem[RTE_MAX_QUEUES_PER_PORT];
+ struct otx2_qint cints_mem[RTE_MAX_QUEUES_PER_PORT];
uint16_t txschq[NIX_TXSCH_LVL_CNT];
uint16_t txschq_contig[NIX_TXSCH_LVL_CNT];
uint16_t txschq_index[NIX_TXSCH_LVL_CNT];
/* IRQ */
int otx2_nix_register_irqs(struct rte_eth_dev *eth_dev);
int oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev);
+int oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev);
void otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev);
void oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev);
+void oxt2_nix_unregister_cq_irqs(struct rte_eth_dev *eth_dev);
+
+int otx2_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id);
+int otx2_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id);
/* Debug */
int otx2_nix_reg_dump(struct otx2_eth_dev *dev, uint64_t *data);
#include <inttypes.h>
#include <rte_bus_pci.h>
+#include <rte_malloc.h>
#include "otx2_ethdev.h"
(int)((reg >> 8) & 0xfffff), (uint8_t)(reg & 0xff));
}
+static void
+nix_lf_cq_irq(void *param)
+{
+ struct otx2_qint *cint = (struct otx2_qint *)param;
+ struct rte_eth_dev *eth_dev = cint->eth_dev;
+ struct otx2_eth_dev *dev;
+
+ dev = otx2_eth_pmd_priv(eth_dev);
+ /* Clear interrupt */
+ otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_INT(cint->qintx));
+}
+
static void
nix_lf_q_irq(void *param)
{
}
}
+int
+oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint8_t rc = 0, vec, q;
+
+ dev->configured_cints = RTE_MIN(dev->cints,
+ eth_dev->data->nb_rx_queues);
+
+ for (q = 0; q < dev->configured_cints; q++) {
+ vec = dev->nix_msixoff + NIX_LF_INT_VEC_CINT_START + q;
+
+ /* Clear CINT CNT */
+ otx2_write64(0, dev->base + NIX_LF_CINTX_CNT(q));
+
+ /* Clear interrupt */
+ otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_ENA_W1C(q));
+
+ dev->cints_mem[q].eth_dev = eth_dev;
+ dev->cints_mem[q].qintx = q;
+
+ /* Sync cints_mem update */
+ rte_smp_wmb();
+
+ /* Register queue irq vector */
+ rc = otx2_register_irq(handle, nix_lf_cq_irq,
+ &dev->cints_mem[q], vec);
+ if (rc) {
+ otx2_err("Fail to register CQ irq, rc=%d", rc);
+ return rc;
+ }
+
+ if (!handle->intr_vec) {
+ handle->intr_vec = rte_zmalloc("intr_vec",
+ dev->configured_cints *
+ sizeof(int), 0);
+ if (!handle->intr_vec) {
+ otx2_err("Failed to allocate %d rx intr_vec",
+ dev->configured_cints);
+ return -ENOMEM;
+ }
+ }
+ /* VFIO vector zero is resereved for misc interrupt so
+ * doing required adjustment. (b13bfab4cd)
+ */
+ handle->intr_vec[q] = RTE_INTR_VEC_RXTX_OFFSET + vec;
+
+ /* Configure CQE interrupt coalescing parameters */
+ otx2_write64(((CQ_CQE_THRESH_DEFAULT) |
+ (CQ_CQE_THRESH_DEFAULT << 32) |
+ (CQ_TIMER_THRESH_DEFAULT << 48)),
+ dev->base + NIX_LF_CINTX_WAIT((q)));
+
+ /* Keeping the CQ interrupt disabled as the rx interrupt
+ * feature needs to be enabled/disabled on demand.
+ */
+ }
+
+ return rc;
+}
+
+void
+oxt2_nix_unregister_cq_irqs(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int vec, q;
+
+ for (q = 0; q < dev->configured_cints; q++) {
+ vec = dev->nix_msixoff + NIX_LF_INT_VEC_CINT_START + q;
+
+ /* Clear CINT CNT */
+ otx2_write64(0, dev->base + NIX_LF_CINTX_CNT(q));
+
+ /* Clear interrupt */
+ otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_ENA_W1C(q));
+
+ /* Unregister queue irq vector */
+ otx2_unregister_irq(handle, nix_lf_cq_irq,
+ &dev->cints_mem[q], vec);
+ }
+}
+
int
otx2_nix_register_irqs(struct rte_eth_dev *eth_dev)
{
nix_lf_unregister_err_irq(eth_dev);
nix_lf_unregister_ras_irq(eth_dev);
}
+
+int
+otx2_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ /* Enable CINT interrupt */
+ otx2_write64(BIT_ULL(0), dev->base +
+ NIX_LF_CINTX_ENA_W1S(rx_queue_id));
+
+ return 0;
+}
+
+int
+otx2_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ /* Clear and disable CINT interrupt */
+ otx2_write64(BIT_ULL(0), dev->base +
+ NIX_LF_CINTX_ENA_W1C(rx_queue_id));
+
+ return 0;
+}