/* Portal driver */
/*****************/
-static __thread int fd = -1;
+static __thread int bmfd = -1;
static __thread struct bm_portal_config pcfg;
static __thread struct dpaa_ioctl_portal_map map = {
.type = dpaa_portal_bman
pcfg.index = map.index;
bman_depletion_fill(&pcfg.mask);
- fd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
- if (fd == -1) {
+ bmfd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (bmfd == -1) {
pr_err("BMan irq init failed");
process_portal_unmap(&map.addr);
return -EBUSY;
}
/* Use the IRQ FD as a unique IRQ number */
- pcfg.irq = fd;
+ pcfg.irq = bmfd;
portal = bman_create_affine_portal(&pcfg);
if (!portal) {
/* Set the IRQ number */
irq_map.type = dpaa_portal_bman;
irq_map.portal_cinh = map.addr.cinh;
- process_portal_irq_map(fd, &irq_map);
+ process_portal_irq_map(bmfd, &irq_map);
return 0;
}
__maybe_unused const struct bm_portal_config *cfg;
int ret;
- process_portal_irq_unmap(fd);
+ process_portal_irq_unmap(bmfd);
cfg = bman_destroy_affine_portal();
DPAA_BUG_ON(cfg != &pcfg);
return ret;
}
+int bman_thread_fd(void)
+{
+ return bmfd;
+}
+
int bman_thread_init(void)
{
/* Convert from contiguous/virtual cpu numbering to real cpu when
return limit;
}
+int qman_irqsource_add(u32 bits)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ bits = bits & QM_PIRQ_VISIBLE;
+
+ /* Clear any previously remaining interrupt conditions in
+ * QCSP_ISR. This prevents raising a false interrupt when
+ * interrupt conditions are enabled in QCSP_IER.
+ */
+ qm_isr_status_clear(&p->p, bits);
+ dpaa_set_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+
+
+ return 0;
+}
+
+int qman_irqsource_remove(u32 bits)
+{
+ struct qman_portal *p = get_affine_portal();
+ u32 ier;
+
+ /* Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+
+ bits &= QM_PIRQ_VISIBLE;
+ dpaa_clear_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+ ier = qm_isr_enable_read(&p->p);
+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_isr_status_clear(&p->p, ~ier);
+ return 0;
+}
+
u16 qman_affine_channel(int cpu)
{
if (cpu < 0) {
return rx_number;
}
+void qman_clear_irq(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
+ qm_isr_status_clear(&p->p, clear);
+}
+
u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
void **bufs)
{
return ret;
}
+int qman_thread_fd(void)
+{
+ return qmfd;
+}
+
int qman_thread_init(void)
{
/* Convert from contiguous/virtual cpu numbering to real cpu when
* rather than breaking that encapsulation I am simply hard-coding the
* offset to the inhibit register here.
*/
- out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
+ out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0x36C0, 0);
}
struct qman_portal *fsl_qman_portal_create(void)
u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
void **bufs);
+/**
+ * qman_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions). Returns zero for success, or
+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+int qman_irqsource_add(u32 bits);
+
+/**
+ * qman_irqsource_remove - remove processing sources from being interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions. Returns zero for success,
+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+int qman_irqsource_remove(u32 bits);
+
/**
* qman_affine_channel - return the channel ID of an portal
* @cpu: the cpu whose affine portal is the subject of the query
int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);
int bman_free_raw_portal(struct dpaa_raw_portal *portal);
+/* Obtain thread-local UIO file-descriptors */
+int qman_thread_fd(void);
+int bman_thread_fd(void);
+
/* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt
* line before notifying us, and this post-processing re-enables it once
* processing is complete. As such, it is essential to call this before going
void qman_thread_irq(void);
void bman_thread_irq(void);
+void qman_clear_irq(void);
+
/* Global setup */
int qman_global_init(void);
int bman_global_init(void);
DPDK_18.08 {
global:
-
fman_if_get_sg_enable;
fman_if_set_sg;
of_get_mac_address;
local: *;
} DPDK_18.02;
+
+DPDK_18.11 {
+ global:
+ bman_thread_irq;
+ fman_if_get_sg_enable;
+ fman_if_set_sg;
+ qman_clear_irq;
+
+ qman_irqsource_add;
+ qman_irqsource_remove;
+ qman_thread_fd;
+ qman_thread_irq;
+
+ local: *;
+} DPDK_18.08;