.type = dpaa_portal_qman
};
+u16 dpaa_get_qm_channel_caam(void)
+{
+ return qm_channel_caam;
+}
+
+u16 dpaa_get_qm_channel_pool(void)
+{
+ return qm_channel_pool1;
+}
+
static int fsl_qman_portal_init(uint32_t index, int is_shared)
{
struct qman_portal *portal;
qm_dc_portal_pme = 3
};
+__rte_internal
+u16 dpaa_get_qm_channel_caam(void);
+
+__rte_internal
+u16 dpaa_get_qm_channel_pool(void);
+
/* Portal processing (interrupt) sources */
#define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
/* for conversion from n of qm_channel */
static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
{
- return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
+ return QM_SDQCR_CHANNELS_POOL(channel + 1 - dpaa_get_qm_channel_pool());
}
/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
bman_query_free_buffers;
bman_release;
bman_thread_irq;
+ dpaa_get_qm_channel_caam;
+ dpaa_get_qm_channel_pool;
dpaa_logtype_eventdev;
dpaa_logtype_mempool;
dpaa_logtype_pmd;
netcfg_release;
per_lcore_dpaa_io;
per_lcore_held_bufs;
- qm_channel_caam;
- qm_channel_pool1;
qman_alloc_cgrid_range;
qman_alloc_pool_range;
qman_clear_irq;
qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
fq_opts.fqd.context_b = fqid_out;
- fq_opts.fqd.dest.channel = qm_channel_caam;
+ fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
fq_opts.fqd.dest.wq = 0;
fq_in->cb.ern = ern_sec_fq_handler;