return -1;
}
+void
+qman_portal_uninhibit_isr(struct qman_portal *portal)
+{
+ qm_isr_uninhibit(&portal->p);
+}
+
struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
const struct qman_cgrs *cgrs)
{
dpaa_set_bits(bits, &p->irq_sources);
qm_isr_enable_write(&p->p, p->irq_sources);
+ return 0;
+}
+
+int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits)
+{
+ bits = bits & QM_PIRQ_VISIBLE;
+
+ /* Clear any previously remaining interrupt conditions in
+ * QCSP_ISR. This prevents raising a false interrupt when
+ * interrupt conditions are enabled in QCSP_IER.
+ */
+ qm_isr_status_clear(&p->p, bits);
+ dpaa_set_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
return 0;
}
return 0;
}
+int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+ u32 ier;
+
+ /* Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+
+ bits &= QM_PIRQ_VISIBLE;
+ dpaa_clear_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+ ier = qm_isr_enable_read(&p->p);
+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_isr_status_clear(&p->p, ~ier);
+ return 0;
+}
+
u16 qman_affine_channel(int cpu)
{
if (cpu < 0) {
out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0x36C0, 0);
}
+void qman_fq_portal_thread_irq(struct qman_portal *qp)
+{
+ qman_portal_uninhibit_isr(qp);
+}
+
struct qman_portal *fsl_qman_fq_portal_create(int *fd)
{
struct qman_portal *portal = NULL;
struct qman_portal *qman_alloc_global_portal(struct qm_portal_config *q_pcfg);
int qman_free_global_portal(struct qman_portal *portal);
+void qman_portal_uninhibit_isr(struct qman_portal *portal);
+
struct qm_portal_config *qm_get_unused_portal(void);
struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
*/
void qman_thread_irq(void);
void bman_thread_irq(void);
+void qman_fq_portal_thread_irq(struct qman_portal *qp);
void qman_clear_irq(void);
global:
fsl_qman_fq_portal_create;
+ qman_fq_portal_irqsource_add;
+ qman_fq_portal_irqsource_remove;
+ qman_fq_portal_thread_irq;
} DPDK_19.05;
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017 NXP
+ * Copyright 2017-2019 NXP
*
*/
/* System headers */
return ret;
}
+static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
+
+ if (!rxq->is_static)
+ return -EINVAL;
+
+ return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI);
+}
+
+static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
+ uint32_t temp;
+ ssize_t temp1;
+
+ if (!rxq->is_static)
+ return -EINVAL;
+
+ qman_fq_portal_irqsource_remove(rxq->qp, ~0);
+
+ temp1 = read(rxq->q_fd, &temp, sizeof(temp));
+ if (temp1 != sizeof(temp))
+ DPAA_EVENTDEV_ERR("irq read error");
+
+ qman_fq_portal_thread_irq(rxq->qp);
+
+ return 0;
+}
+
static struct eth_dev_ops dpaa_devops = {
.dev_configure = dpaa_eth_dev_configure,
.dev_start = dpaa_eth_dev_start,
.mac_addr_set = dpaa_dev_set_mac_addr,
.fw_version_get = dpaa_fw_version_get,
+
+ .rx_queue_intr_enable = dpaa_dev_queue_intr_enable,
+ .rx_queue_intr_disable = dpaa_dev_queue_intr_disable,
};
static bool