net/bnxt: support lack of huge pages
authorAjit Khaparde <ajit.khaparde@broadcom.com>
Thu, 1 Jun 2017 17:07:01 +0000 (12:07 -0500)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 12 Jun 2017 09:41:28 +0000 (10:41 +0100)
rte_malloc_virt2phy() does not return a physical address if huge pages
aren't in use.  Further, rte_memzone->phys_addr is not a physical address.

Use rte_mem_virt2phy() and manually lock pages to support lack of
huge pages.

Also check the return value of rte_mem_virt2phy()

Verify the function returns an address. Otherwise return an error and
log a message.

Signed-off-by: Stephen Hurd <stephen.hurd@broadcom.com>
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
drivers/net/bnxt/bnxt_hwrm.c
drivers/net/bnxt/bnxt_ring.c
drivers/net/bnxt/bnxt_vnic.c

index 7c7c9ce..d2a8d8f 100644 (file)
@@ -489,8 +489,15 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                        rc = -ENOMEM;
                        goto error;
                }
+               rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
                bp->hwrm_cmd_resp_dma_addr =
-                       rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
+                       rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+               if (bp->hwrm_cmd_resp_dma_addr == 0) {
+                       RTE_LOG(ERR, PMD,
+                       "Unable to map response buffer to physical memory.\n");
+                       rc = -ENOMEM;
+                       goto error;
+               }
                bp->max_resp_len = max_resp_len;
        }
 
@@ -1363,10 +1370,16 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
        bp->max_req_len = HWRM_MAX_REQ_LEN;
        bp->max_resp_len = HWRM_MAX_RESP_LEN;
        bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
+       rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
        if (bp->hwrm_cmd_resp_addr == NULL)
                return -ENOMEM;
        bp->hwrm_cmd_resp_dma_addr =
-               rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
+               rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+       if (bp->hwrm_cmd_resp_dma_addr == 0) {
+               RTE_LOG(ERR, PMD,
+                       "unable to map response address to physical memory\n");
+               return -ENOMEM;
+       }
        rte_spinlock_init(&bp->hwrm_lock);
 
        return 0;
index c1698ea..5e4236a 100644 (file)
@@ -32,6 +32,7 @@
  */
 
 #include <rte_memzone.h>
+#include <unistd.h>
 
 #include "bnxt.h"
 #include "bnxt_cpr.h"
@@ -96,6 +97,8 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
        struct rte_pci_device *pdev = bp->pdev;
        const struct rte_memzone *mz = NULL;
        char mz_name[RTE_MEMZONE_NAMESIZE];
+       phys_addr_t mz_phys_addr;
+       int sz;
 
        int stats_len = (tx_ring_info || rx_ring_info) ?
            RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0;
@@ -136,21 +139,37 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
        mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
        mz = rte_memzone_lookup(mz_name);
        if (!mz) {
-               mz = rte_memzone_reserve(mz_name, total_alloc_len,
+               mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
                                         SOCKET_ID_ANY,
                                         RTE_MEMZONE_2MB |
-                                        RTE_MEMZONE_SIZE_HINT_ONLY);
+                                        RTE_MEMZONE_SIZE_HINT_ONLY,
+                                        getpagesize());
                if (mz == NULL)
                        return -ENOMEM;
        }
        memset(mz->addr, 0, mz->len);
+       mz_phys_addr = mz->phys_addr;
+       if ((unsigned long)mz->addr == mz_phys_addr) {
+               RTE_LOG(WARNING, PMD,
+                       "Memzone physical address same as virtual.\n");
+               RTE_LOG(WARNING, PMD,
+                       "Using rte_mem_virt2phy()\n");
+               for (sz = 0; sz < total_alloc_len; sz += getpagesize())
+                       rte_mem_lock_page(((char *)mz->addr) + sz);
+               mz_phys_addr = rte_mem_virt2phy(mz->addr);
+               if (mz_phys_addr == 0) {
+                       RTE_LOG(ERR, PMD,
+                       "unable to map ring address to physical memory\n");
+                       return -ENOMEM;
+               }
+       }
 
        if (tx_ring_info) {
                tx_ring = tx_ring_info->tx_ring_struct;
 
                tx_ring->bd = ((char *)mz->addr + tx_ring_start);
                tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
-               tx_ring->bd_dma = mz->phys_addr + tx_ring_start;
+               tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
                tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
                tx_ring->mem_zone = (const void *)mz;
 
@@ -170,7 +189,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
                rx_ring->bd = ((char *)mz->addr + rx_ring_start);
                rx_ring_info->rx_desc_ring =
                    (struct rx_prod_pkt_bd *)rx_ring->bd;
-               rx_ring->bd_dma = mz->phys_addr + rx_ring_start;
+               rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
                rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
                rx_ring->mem_zone = (const void *)mz;
 
@@ -185,7 +204,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
        }
 
        cp_ring->bd = ((char *)mz->addr + cp_ring_start);
-       cp_ring->bd_dma = mz->phys_addr + cp_ring_start;
+       cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
        cp_ring_info->cp_desc_ring = cp_ring->bd;
        cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
        cp_ring->mem_zone = (const void *)mz;
@@ -196,7 +215,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
                *cp_ring->vmem = ((char *)mz->addr + stats_len);
        if (stats_len) {
                cp_ring_info->hw_stats = mz->addr;
-               cp_ring_info->hw_stats_map = mz->phys_addr;
+               cp_ring_info->hw_stats_map = mz_phys_addr;
        }
        cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
        return 0;
index 4e378a9..fe2244b 100644 (file)
@@ -173,6 +173,7 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
                                HW_HASH_KEY_SIZE);
        uint16_t max_vnics;
        int i;
+       phys_addr_t mz_phys_addr;
 
        max_vnics = bp->max_vnics;
        snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
@@ -189,6 +190,19 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
                if (!mz)
                        return -ENOMEM;
        }
+       mz_phys_addr = mz->phys_addr;
+       if ((unsigned long)mz->addr == mz_phys_addr) {
+               RTE_LOG(WARNING, PMD,
+                       "Memzone physical address same as virtual.\n");
+               RTE_LOG(WARNING, PMD,
+                       "Using rte_mem_virt2phy()\n");
+               mz_phys_addr = rte_mem_virt2phy(mz->addr);
+               if (mz_phys_addr == 0) {
+                       RTE_LOG(ERR, PMD,
+                       "unable to map vnic address to physical memory\n");
+                       return -ENOMEM;
+               }
+       }
 
        for (i = 0; i < max_vnics; i++) {
                vnic = &bp->vnic_info[i];
@@ -198,7 +212,7 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
                        (void *)((char *)mz->addr + (entry_length * i));
                memset(vnic->rss_table, -1, entry_length);
 
-               vnic->rss_table_dma_addr = mz->phys_addr + (entry_length * i);
+               vnic->rss_table_dma_addr = mz_phys_addr + (entry_length * i);
                vnic->rss_hash_key = (void *)((char *)vnic->rss_table +
                             HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table));