= TAILQ_HEAD_INITIALIZER(qdma_queue_list);
/* QDMA Virtual Queues */
-struct qdma_virt_queue *qdma_vqs;
+static struct qdma_virt_queue *qdma_vqs;
/* QDMA per core data */
-struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
+static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
static struct qdma_hw_queue *
alloc_hw_queue(uint32_t lcore_id)
}
}
-int __rte_experimental
+int
rte_qdma_init(void)
{
DPAA2_QDMA_FUNC_TRACE();
return 0;
}
-void __rte_experimental
+void
rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
{
DPAA2_QDMA_FUNC_TRACE();
qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
}
-int __rte_experimental
+int
rte_qdma_reset(void)
{
struct qdma_hw_queue *queue;
return 0;
}
-int __rte_experimental
+int
rte_qdma_configure(struct rte_qdma_config *qdma_config)
{
int ret;
return 0;
}
-int __rte_experimental
+int
rte_qdma_start(void)
{
DPAA2_QDMA_FUNC_TRACE();
return 0;
}
-int __rte_experimental
+int
rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
{
char ring_name[32];
qdma_vqs[i].exclusive_hw_queue = 1;
} else {
/* Allocate a Ring for Virutal Queue in VQ mode */
- sprintf(ring_name, "status ring %d", i);
+ snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
qdma_vqs[i].status_ring = rte_ring_create(ring_name,
qdma_dev.fle_pool_count, rte_socket_id(), 0);
if (!qdma_vqs[i].status_ring) {
return job;
}
-void __rte_experimental
+void
rte_qdma_vq_stats(uint16_t vq_id,
struct rte_qdma_vq_stats *vq_status)
{
}
}
-int __rte_experimental
+int
rte_qdma_vq_destroy(uint16_t vq_id)
{
struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
return 0;
}
-void __rte_experimental
+void
rte_qdma_stop(void)
{
DPAA2_QDMA_FUNC_TRACE();
qdma_dev.state = 0;
}
-void __rte_experimental
+void
rte_qdma_destroy(void)
{
DPAA2_QDMA_FUNC_TRACE();