mempool/dpaa2: support saving context of buffer pool
authorShreyansh Jain <shreyansh.jain@nxp.com>
Fri, 11 Jan 2019 12:25:02 +0000 (12:25 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 14 Jan 2019 16:44:29 +0000 (17:44 +0100)
Initial design was to have the buffer pool per process where a
global static array stores the bpids. But, in case of secondary
processes, this would not allow the I/O threads to translate the
bpid in Rx'd packets.

This patch moves the array to a global area (rte_malloc) and in
case of Rx thread not containing a valid reference to the array,
reference is build using the handle avaialble in the dpaa2_queue.

Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
drivers/mempool/dpaa2/dpaa2_hw_mempool.c
drivers/mempool/dpaa2/dpaa2_hw_mempool.h
drivers/net/dpaa2/dpaa2_ethdev.c
drivers/net/dpaa2/dpaa2_rxtx.c

index efbeebe..20c606d 100644 (file)
@@ -141,6 +141,7 @@ struct dpaa2_queue {
        };
        struct rte_event ev;
        dpaa2_queue_cb_dqrr_t *cb;
+       struct dpaa2_bp_info *bp_array;
 };
 
 struct swp_active_dqs {
index 790cded..335eae4 100644 (file)
@@ -32,7 +32,7 @@
 
 #include <dpaax_iova_table.h>
 
-struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
+struct dpaa2_bp_info *rte_dpaa2_bpid_info;
 static struct dpaa2_bp_list *h_bp_list;
 
 /* Dynamic logging identified for mempool */
@@ -50,6 +50,16 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
 
        avail_dpbp = dpaa2_alloc_dpbp_dev();
 
+       if (rte_dpaa2_bpid_info == NULL) {
+               rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL,
+                                     sizeof(struct dpaa2_bp_info) * MAX_BPID,
+                                     RTE_CACHE_LINE_SIZE);
+               if (rte_dpaa2_bpid_info == NULL)
+                       return -ENOMEM;
+               memset(rte_dpaa2_bpid_info, 0,
+                      sizeof(struct dpaa2_bp_info) * MAX_BPID);
+       }
+
        if (!avail_dpbp) {
                DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
                return -ENOENT;
index 4d34687..9369461 100644 (file)
@@ -59,7 +59,7 @@ struct dpaa2_bp_info {
 #define mempool_to_bpinfo(mp) ((struct dpaa2_bp_info *)(mp)->pool_data)
 #define mempool_to_bpid(mp) ((mempool_to_bpinfo(mp))->bpid)
 
-extern struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
+extern struct dpaa2_bp_info *rte_dpaa2_bpid_info;
 
 int rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
                       void **obj_table, unsigned int count);
index 861fbcd..3a20158 100644 (file)
@@ -485,6 +485,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
        }
        dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
        dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
+       dpaa2_q->bp_array = rte_dpaa2_bpid_info;
 
        /*Get the flow id from given VQ id*/
        flow_id = rx_queue_id % priv->nb_rx_queues;
index 816ea00..6e2e8ab 100644 (file)
@@ -518,6 +518,11 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                        return 0;
                }
        }
+
+       if (unlikely(!rte_dpaa2_bpid_info &&
+                    rte_eal_process_type() == RTE_PROC_SECONDARY))
+               rte_dpaa2_bpid_info = dpaa2_q->bp_array;
+
        swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
        pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
        if (unlikely(!q_storage->active_dqs)) {