]> git.droids-corp.org - dpdk.git/commitdiff
i40e/base: support pre-allocated pages for pd
authorJingjing Wu <jingjing.wu@intel.com>
Sun, 6 Sep 2015 07:11:44 +0000 (15:11 +0800)
committerThomas Monjalon <thomas.monjalon@6wind.com>
Thu, 1 Oct 2015 23:35:23 +0000 (01:35 +0200)
The i40e_add_pd_table_entry() routine is being modified to handle both
cases where a backing page is passed and where backing page is allocated
in i40e_add_pd_table_entry().

For pble resource management, it is more efficient for it to manage its
backing pages. For VF, pble backing page addresses will be send to PF
driver for pble resource.

The i40e_remove_pd_bp() is also modified to not free pre-allocated pages and
free only ones which were allocated in i40e_add_pd_table_entry().

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
Tested-by: Huilong Xu <huilongx.xu@intel.com>
drivers/net/i40e/base/i40e_hmc.c
drivers/net/i40e/base/i40e_hmc.h
drivers/net/i40e/base/i40e_lan_hmc.c

index 3b29251559e41e2dfaa74b4bf56fab5b7b8c88ec..75d38412c4681fbca611d839ae134d86780a4fa8 100644 (file)
@@ -127,6 +127,7 @@ exit:
  * @hw: pointer to our HW structure
  * @hmc_info: pointer to the HMC configuration information structure
  * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
  *
  * This function:
  *     1. Initializes the pd entry
@@ -140,12 +141,14 @@ exit:
  **/
 enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
-                                             u32 pd_index)
+                                             u32 pd_index,
+                                             struct i40e_dma_mem *rsrc_pg)
 {
        enum i40e_status_code ret_code = I40E_SUCCESS;
        struct i40e_hmc_pd_table *pd_table;
        struct i40e_hmc_pd_entry *pd_entry;
        struct i40e_dma_mem mem;
+       struct i40e_dma_mem *page = &mem;
        u32 sd_idx, rel_pd_idx;
        u64 *pd_addr;
        u64 page_desc;
@@ -166,19 +169,25 @@ enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
        pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
        pd_entry = &pd_table->pd_entry[rel_pd_idx];
        if (!pd_entry->valid) {
-               /* allocate a 4K backing page */
-               ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
-                                                I40E_HMC_PAGED_BP_SIZE,
-                                                I40E_HMC_PD_BP_BUF_ALIGNMENT);
-               if (ret_code)
-                       goto exit;
+               if (rsrc_pg) {
+                       pd_entry->rsrc_pg = true;
+                       page = rsrc_pg;
+               } else {
+                       /* allocate a 4K backing page */
+                       ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+                                               I40E_HMC_PAGED_BP_SIZE,
+                                               I40E_HMC_PD_BP_BUF_ALIGNMENT);
+                       if (ret_code)
+                               goto exit;
+                       pd_entry->rsrc_pg = false;
+               }
 
-               i40e_memcpy(&pd_entry->bp.addr, &mem,
+               i40e_memcpy(&pd_entry->bp.addr, page,
                            sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
                pd_entry->bp.sd_pd_index = pd_index;
                pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
                /* Set page address and valid bit */
-               page_desc = mem.pa | 0x1;
+               page_desc = page->pa | 0x1;
 
                pd_addr = (u64 *)pd_table->pd_page_addr.va;
                pd_addr += rel_pd_idx;
@@ -253,7 +262,8 @@ enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
        I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
 
        /* free memory here */
-       ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+       if (!pd_entry->rsrc_pg)
+               ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
        if (I40E_SUCCESS != ret_code)
                goto exit;
        if (!pd_table->ref_cnt)
index c2cdc92381ddceed8b5d8c8f8b738d41bd4eb3f9..343b251fa0ae7b1b7101691f5d456c71947f3062 100644 (file)
@@ -69,6 +69,7 @@ struct i40e_hmc_bp {
 struct i40e_hmc_pd_entry {
        struct i40e_hmc_bp bp;
        u32 sd_index;
+       bool rsrc_pg;
        bool valid;
 };
 
@@ -225,7 +226,8 @@ enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
 
 enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
-                                             u32 pd_index);
+                                             u32 pd_index,
+                                             struct i40e_dma_mem *rsrc_pg);
 enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
                                        struct i40e_hmc_info *hmc_info,
                                        u32 idx);
index 3c717d2a46846706e378f4fe7bd53807730495d6..651176718c537dd92a815ac7463d96bceb733a6a 100644 (file)
@@ -394,7 +394,7 @@ enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
                                /* update the pd table entry */
                                ret_code = i40e_add_pd_table_entry(hw,
                                                                info->hmc_info,
-                                                               i);
+                                                               i, NULL);
                                if (I40E_SUCCESS != ret_code) {
                                        pd_error = true;
                                        break;