struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
RTE_DEFINE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
+struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+
TAILQ_HEAD(dpio_device_list, dpaa2_dpio_dev);
static struct dpio_device_list *dpio_dev_list; /*!< DPIO device list */
static uint32_t io_space_count;
struct queue_storage_info_t {
struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
+ struct qbman_result *active_dqs;
+ int active_dpio_id;
+ int toggle;
};
struct dpaa2_queue {
struct queue_storage_info_t *q_storage;
};
+struct swp_active_dqs {
+ struct qbman_result *global_active_dqs;
+ uint64_t reserved[7];
+};
+
+#define NUM_MAX_SWP 64
+
+extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+
/*! Global MCP list */
extern void *(*rte_mcp_ptr_list);
#endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+static inline
+int check_swp_active_dqs(uint16_t dpio_index)
+{
+ if (rte_global_active_dqs_list[dpio_index].global_active_dqs != NULL)
+ return 1;
+ return 0;
+}
+
+static inline
+void clear_swp_active_dqs(uint16_t dpio_index)
+{
+ rte_global_active_dqs_list[dpio_index].global_active_dqs = NULL;
+}
+
+static inline
+struct qbman_result *get_swp_active_dqs(uint16_t dpio_index)
+{
+ return rte_global_active_dqs_list[dpio_index].global_active_dqs;
+}
+
+static inline
+void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
+{
+ rte_global_active_dqs_list[dpio_index].global_active_dqs = dqs;
+}
struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);