drivers: replace page size definitions with function
authorThomas Monjalon <thomas@monjalon.net>
Thu, 25 Feb 2021 00:07:56 +0000 (01:07 +0100)
committerThomas Monjalon <thomas@monjalon.net>
Tue, 23 Mar 2021 07:41:05 +0000 (08:41 +0100)
The page size is often retrieved from the macro PAGE_SIZE.
If PAGE_SIZE is not defined, it is either using hard coded default,
or getting the system value from the UNIX-only function sysconf().

Such definitions are replaced with the generic function
rte_mem_page_size() defined for each supported OS.

Removing PAGE_SIZE definitions will fix dlb drivers for musl libc,
because #ifdef checks were missing, causing redefinition errors.

Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Andrew Boyer <aboyer@pensando.io>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: David Marchand <david.marchand@redhat.com>
Acked-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
26 files changed:
drivers/bus/pci/linux/pci_vfio.c
drivers/bus/vmbus/linux/vmbus_uio.c
drivers/bus/vmbus/private.h
drivers/bus/vmbus/rte_vmbus_reg.h
drivers/bus/vmbus/vmbus_common_uio.c
drivers/crypto/virtio/virtio_pci.h
drivers/event/dlb/dlb.c
drivers/event/dlb/dlb_priv.h
drivers/event/dlb/pf/base/dlb_osdep.h
drivers/event/dlb/pf/dlb_main.h
drivers/event/dlb/pf/dlb_pf.c
drivers/event/dlb2/dlb2_priv.h
drivers/event/dlb2/pf/base/dlb2_osdep.h
drivers/event/dlb2/pf/dlb2_main.h
drivers/event/dlb2/pf/dlb2_pf.c
drivers/net/bnx2x/ecore_fw_defs.h
drivers/net/ionic/ionic_lif.c
drivers/net/ionic/ionic_main.c
drivers/net/ionic/ionic_osdep.h
drivers/net/netvsc/hn_rndis.c
drivers/net/netvsc/hn_rxtx.c
drivers/net/netvsc/hn_var.h
drivers/net/virtio/virtio.h
drivers/net/virtio/virtio_ethdev.c
drivers/net/virtio/virtio_ethdev.h
drivers/vdpa/ifc/ifcvf_vdpa.c

index e3f7b6a..9d689d6 100644 (file)
 
 #ifdef VFIO_PRESENT
 
-#ifndef PAGE_SIZE
-#define PAGE_SIZE   (sysconf(_SC_PAGESIZE))
-#endif
-#define PAGE_MASK   (~(PAGE_SIZE - 1))
-
 static struct rte_tailq_elem rte_vfio_tailq = {
        .name = "VFIO_RESOURCE_LIST",
 };
@@ -507,8 +502,8 @@ pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
                 */
                uint32_t table_start = msix_table->offset;
                uint32_t table_end = table_start + msix_table->size;
-               table_end = RTE_ALIGN(table_end, PAGE_SIZE);
-               table_start = RTE_ALIGN_FLOOR(table_start, PAGE_SIZE);
+               table_end = RTE_ALIGN(table_end, rte_mem_page_size());
+               table_start = RTE_ALIGN_FLOOR(table_start, rte_mem_page_size());
 
                /* If page-aligned start of MSI-X table is less than the
                 * actual MSI-X table start address, reassign to the actual
index 5dc0c47..b52ca5b 100644 (file)
@@ -154,7 +154,7 @@ vmbus_uio_map_resource_by_index(struct rte_vmbus_device *dev, int idx,
                vmbus_map_addr = vmbus_find_max_end_va();
 
        /* offset is special in uio it indicates which resource */
-       offset = idx * PAGE_SIZE;
+       offset = idx * rte_mem_page_size();
 
        mapaddr = vmbus_map_resource(vmbus_map_addr, fd, offset, size, flags);
        close(fd);
@@ -224,7 +224,7 @@ static int vmbus_uio_map_subchan(const struct rte_vmbus_device *dev,
        }
        file_size = sb.st_size;
 
-       if (file_size == 0 || (file_size & (PAGE_SIZE - 1))) {
+       if (file_size == 0 || (file_size & (rte_mem_page_size() - 1))) {
                VMBUS_LOG(ERR, "incorrect size %s: %zu",
                          ring_path, file_size);
 
index f19b14e..528d60a 100644 (file)
@@ -9,13 +9,10 @@
 #include <stdbool.h>
 #include <sys/uio.h>
 #include <rte_log.h>
+#include <rte_eal_paging.h>
 #include <rte_vmbus_reg.h>
 #include <rte_bus_vmbus.h>
 
-#ifndef PAGE_SIZE
-#define PAGE_SIZE      4096
-#endif
-
 extern struct rte_vmbus_bus rte_vmbus_bus;
 
 extern int vmbus_logtype_bus;
index f5a0693..8562672 100644 (file)
@@ -100,7 +100,7 @@ struct vmbus_bufring {
                uint32_t value;
        } feature_bits;
 
-       /* Pad it to PAGE_SIZE so that data starts on page boundary */
+       /* Pad it to rte_mem_page_size() so that data starts on page boundary */
        uint8_t reserved2[4028];
 
        /*
index a689bf1..8582e32 100644 (file)
@@ -63,7 +63,7 @@ vmbus_uio_map_secondary(struct rte_vmbus_device *dev)
 
        for (i = 0; i != uio_res->nb_maps; i++) {
                void *mapaddr;
-               off_t offset = i * PAGE_SIZE;
+               off_t offset = i * rte_mem_page_size();
 
                mapaddr = vmbus_map_resource(uio_res->maps[i].addr,
                                             fd, offset,
@@ -175,7 +175,7 @@ vmbus_uio_map_resource(struct rte_vmbus_device *dev)
        }
 
        dev->int_page = (uint32_t *)((char *)uio_res->maps[HV_INT_PAGE_MAP].addr
-                                    + (PAGE_SIZE >> 1));
+                                    + (rte_mem_page_size() >> 1));
        dev->monitor_page = uio_res->maps[HV_MON_PAGE_MAP].addr;
        return 0;
 }
index d9a214d..0a7ea1b 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <stdint.h>
 
+#include <rte_eal_paging.h>
 #include <rte_pci.h>
 #include <rte_bus_pci.h>
 #include <rte_cryptodev.h>
@@ -67,7 +68,7 @@ struct virtqueue;
  *
  * Note the sizeof(struct vring_desc) is 16 bytes.
  */
-#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
+#define VIRTIO_MAX_INDIRECT ((int) (rte_mem_page_size() / 16))
 
 /* Do we get callbacks when the ring is completely used, even if we've
  * suppressed them?
index 8b26d1d..1174ab3 100644 (file)
@@ -23,6 +23,7 @@
 #include <rte_io.h>
 #include <rte_kvargs.h>
 #include <rte_log.h>
+#include <rte_eal_paging.h>
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
 #include <rte_power_intrinsics.h>
@@ -991,7 +992,8 @@ dlb_hw_create_ldb_port(struct dlb_eventdev *dlb,
                goto error_exit;
        }
 
-       qm_port->pp_mmio_base = DLB_LDB_PP_BASE + PAGE_SIZE * qm_port_id;
+       qm_port->pp_mmio_base = DLB_LDB_PP_BASE +
+                       rte_mem_page_size() * qm_port_id;
        qm_port->id = qm_port_id;
 
        /* The credit window is one high water mark of QEs */
@@ -1181,7 +1183,8 @@ dlb_hw_create_dir_port(struct dlb_eventdev *dlb,
                goto error_exit;
        }
 
-       qm_port->pp_mmio_base = DLB_DIR_PP_BASE + PAGE_SIZE * qm_port_id;
+       qm_port->pp_mmio_base = DLB_DIR_PP_BASE +
+                       rte_mem_page_size() * qm_port_id;
        qm_port->id = qm_port_id;
 
        /* The credit window is one high water mark of QEs */
index 272e174..ca4d6a8 100644 (file)
@@ -76,8 +76,6 @@
 
 #define PP_BASE(is_dir) ((is_dir) ? DLB_DIR_PP_BASE : DLB_LDB_PP_BASE)
 
-#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
-
 #define DLB_NUM_QES_PER_CACHE_LINE 4
 
 #define DLB_MAX_ENQUEUE_DEPTH 64
index 0c119b7..dee2066 100644 (file)
@@ -92,7 +92,7 @@ static inline void *os_map_producer_port(struct dlb_hw *hw,
 
 
        pp_dma_base = (uintptr_t)hw->func_kva + DLB_PP_BASE(is_ldb);
-       addr = (pp_dma_base + (PAGE_SIZE * port_id));
+       addr = (pp_dma_base + (rte_mem_page_size() * port_id));
 
        return (void *)(uintptr_t)addr;
 
index 22e2152..e66ba22 100644 (file)
 #include <rte_spinlock.h>
 #include <rte_pci.h>
 #include <rte_bus_pci.h>
-
-#ifndef PAGE_SIZE
-#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
-#endif
+#include <rte_eal_paging.h>
 
 #include "base/dlb_hw_types.h"
 #include "../dlb_user.h"
index 3aeef6f..2f7e30a 100644 (file)
@@ -303,7 +303,7 @@ dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
        alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
 
        port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
-                                              alloc_sz, PAGE_SIZE);
+                                              alloc_sz, rte_mem_page_size());
        if (port_base == NULL)
                return -ENOMEM;
 
@@ -328,7 +328,8 @@ dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
 
        pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
        dlb_port[response.id][DLB_LDB].pp_addr =
-               (void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
+                       (void *)(uintptr_t)(pp_dma_base +
+                       (rte_mem_page_size() * response.id));
 
        dlb_port[response.id][DLB_LDB].cq_base =
                (void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
@@ -381,7 +382,7 @@ dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
        alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
 
        port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
-                                              alloc_sz, PAGE_SIZE);
+                                              alloc_sz, rte_mem_page_size());
        if (port_base == NULL)
                return -ENOMEM;
 
@@ -406,7 +407,8 @@ dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
 
        pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
        dlb_port[response.id][DLB_DIR].pp_addr =
-               (void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
+                       (void *)(uintptr_t)(pp_dma_base +
+                       (rte_mem_page_size() * response.id));
 
        dlb_port[response.id][DLB_DIR].cq_base =
                (void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
index b73cf3f..eb1a932 100644 (file)
@@ -78,8 +78,6 @@
                                    DLB2_LDB_CQ_MAX_SIZE)
 #define PP_BASE(is_dir) ((is_dir) ? DLB2_DIR_PP_BASE : DLB2_LDB_PP_BASE)
 
-#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
-
 #define DLB2_NUM_QES_PER_CACHE_LINE 4
 
 #define DLB2_MAX_ENQUEUE_DEPTH 64
index c4c34eb..aa101a4 100644 (file)
@@ -89,7 +89,7 @@ static inline void *os_map_producer_port(struct dlb2_hw *hw,
        uint64_t pp_dma_base;
 
        pp_dma_base = (uintptr_t)hw->func_kva + DLB2_PP_BASE(is_ldb);
-       addr = (pp_dma_base + (PAGE_SIZE * port_id));
+       addr = (pp_dma_base + (rte_mem_page_size() * port_id));
 
        return (void *)(uintptr_t)addr;
 }
index f3bee71..9eeda48 100644 (file)
 #include <rte_spinlock.h>
 #include <rte_pci.h>
 #include <rte_bus_pci.h>
-
-#ifndef PAGE_SIZE
-#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
-#endif
+#include <rte_eal_paging.h>
 
 #include "base/dlb2_hw_types.h"
 #include "../dlb2_user.h"
index 1a7d8fc..1142da5 100644 (file)
@@ -284,7 +284,7 @@ dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
        alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
 
        port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
-                                               PAGE_SIZE);
+                                               rte_mem_page_size());
        if (port_base == NULL)
                return -ENOMEM;
 
@@ -307,7 +307,7 @@ dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
 
        pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
        dlb2_port[response.id][DLB2_LDB_PORT].pp_addr =
-               (void *)(pp_base + (PAGE_SIZE * response.id));
+               (void *)(pp_base + (rte_mem_page_size() * response.id));
 
        dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
        memset(&port_memory, 0, sizeof(port_memory));
@@ -359,7 +359,7 @@ dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
        alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
 
        port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
-                                               PAGE_SIZE);
+                                               rte_mem_page_size());
        if (port_base == NULL)
                return -ENOMEM;
 
@@ -382,7 +382,7 @@ dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
 
        pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
        dlb2_port[response.id][DLB2_DIR_PORT].pp_addr =
-               (void *)(pp_base + (PAGE_SIZE * response.id));
+               (void *)(pp_base + (rte_mem_page_size() * response.id));
 
        dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
                (void *)(port_base);
index 5397a70..93bca8a 100644 (file)
@@ -13,6 +13,8 @@
 #ifndef ECORE_FW_DEFS_H
 #define ECORE_FW_DEFS_H
 
+#include <rte_eal_paging.h>
+
 #define CSTORM_ASSERT_LIST_INDEX_OFFSET        (IRO[152].base)
 #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
        (IRO[151].base + ((assertListEntry) * IRO[151].m1))
 #define X_ETH_LOCAL_RING_SIZE 13
 #define FIRST_BD_IN_PKT        0
 #define PARSE_BD_INDEX 1
-#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
+#define NUM_OF_ETH_BDS_IN_PAGE \
+       (rte_mem_page_size() / (STRUCT_SIZE(eth_tx_bd) / 8))
 #define U_ETH_NUM_OF_SGES_TO_FETCH 8
 #define U_ETH_MAX_SGES_FOR_PACKET 3
 
 #define IP_HEADER_ALIGNMENT_PADDING 2
 
 #define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
-       (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
+       (0xFFFF - ((rte_mem_page_size() / ((STRUCT_SIZE(eth_rx_sge)) / 8)) - 1))
 
-#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
-#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
-#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
+#define TU_ETH_CQES_PER_PAGE \
+       (rte_mem_page_size() / (STRUCT_SIZE(eth_rx_cqe) / 8))
+#define U_ETH_BDS_PER_PAGE \
+       (rte_mem_page_size() / (STRUCT_SIZE(eth_rx_bd) / 8))
+#define U_ETH_SGES_PER_PAGE \
+       (rte_mem_page_size() / (STRUCT_SIZE(eth_rx_sge) / 8))
 
 #define U_ETH_BDS_PER_PAGE_MASK        (U_ETH_BDS_PER_PAGE-1)
 #define U_ETH_CQE_PER_PAGE_MASK        (TU_ETH_CQES_PER_PAGE-1)
 
 /* Event Ring definitions */
 #define C_ERES_PER_PAGE \
-       (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
+       (rte_mem_page_size() / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
 #define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
 
 /* number of statistic command */
index cd220ab..431eda7 100644 (file)
@@ -612,18 +612,18 @@ ionic_qcq_alloc(struct ionic_lif *lif,
        cq_size = num_descs * cq_desc_size;
        sg_size = num_descs * sg_desc_size;
 
-       total_size = RTE_ALIGN(q_size, PAGE_SIZE) +
-               RTE_ALIGN(cq_size, PAGE_SIZE);
+       total_size = RTE_ALIGN(q_size, rte_mem_page_size()) +
+                       RTE_ALIGN(cq_size, rte_mem_page_size());
        /*
         * Note: aligning q_size/cq_size is not enough due to cq_base address
         * aligning as q_base could be not aligned to the page.
-        * Adding PAGE_SIZE.
+        * Adding rte_mem_page_size().
         */
-       total_size += PAGE_SIZE;
+       total_size += rte_mem_page_size();
 
        if (flags & IONIC_QCQ_F_SG) {
-               total_size += RTE_ALIGN(sg_size, PAGE_SIZE);
-               total_size += PAGE_SIZE;
+               total_size += RTE_ALIGN(sg_size, rte_mem_page_size());
+               total_size += rte_mem_page_size();
        }
 
        new = rte_zmalloc("ionic", struct_size, 0);
@@ -636,7 +636,7 @@ ionic_qcq_alloc(struct ionic_lif *lif,
 
        new->q.info = rte_calloc_socket("ionic",
                                num_descs, sizeof(void *),
-                               PAGE_SIZE, socket_id);
+                               rte_mem_page_size(), socket_id);
        if (!new->q.info) {
                IONIC_PRINT(ERR, "Cannot allocate queue info");
                err = -ENOMEM;
@@ -673,13 +673,16 @@ ionic_qcq_alloc(struct ionic_lif *lif,
        q_base = new->base;
        q_base_pa = new->base_pa;
 
-       cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
-       cq_base_pa = RTE_ALIGN(q_base_pa + q_size, PAGE_SIZE);
+       cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size,
+                       rte_mem_page_size());
+       cq_base_pa = RTE_ALIGN(q_base_pa + q_size,
+                       rte_mem_page_size());
 
        if (flags & IONIC_QCQ_F_SG) {
                sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size,
-                       PAGE_SIZE);
-               sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
+                               rte_mem_page_size());
+               sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size,
+                               rte_mem_page_size());
                ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
        }
 
@@ -1002,7 +1005,7 @@ ionic_lif_alloc(struct ionic_lif *lif)
 
        IONIC_PRINT(DEBUG, "Allocating Lif Info");
 
-       lif->info_sz = RTE_ALIGN(sizeof(*lif->info), PAGE_SIZE);
+       lif->info_sz = RTE_ALIGN(sizeof(*lif->info), rte_mem_page_size());
 
        lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev,
                "lif_info", 0 /* queue_idx*/,
index 9aa7b2e..7301f53 100644 (file)
@@ -456,7 +456,8 @@ ionic_port_init(struct ionic_adapter *adapter)
        if (idev->port_info)
                return 0;
 
-       idev->port_info_sz = RTE_ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
+       idev->port_info_sz = RTE_ALIGN(sizeof(*idev->port_info),
+                       rte_mem_page_size());
 
        snprintf(z_name, sizeof(z_name), "%s_port_%s_info",
                IONIC_DRV_NAME, adapter->name);
index a6575c3..89ed106 100644 (file)
@@ -17,6 +17,7 @@
 #include <rte_byteorder.h>
 #include <rte_io.h>
 #include <rte_memory.h>
+#include <rte_eal_paging.h>
 
 #include "ionic_logs.h"
 
@@ -25,7 +26,6 @@
 
 #ifndef PAGE_SHIFT
 #define PAGE_SHIFT      12
-#define PAGE_SIZE       (1 << PAGE_SHIFT)
 #endif
 
 #define __iomem
index e317539..e3f7e63 100644 (file)
@@ -67,7 +67,7 @@ hn_rndis_rid(struct hn_data *hv)
 
 static void *hn_rndis_alloc(size_t size)
 {
-       return rte_zmalloc("RNDIS", size, PAGE_SIZE);
+       return rte_zmalloc("RNDIS", size, rte_mem_page_size());
 }
 
 #ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP
@@ -265,17 +265,17 @@ static int hn_nvs_send_rndis_ctrl(struct vmbus_channel *chan,
                return -EINVAL;
        }
 
-       if (unlikely(reqlen > PAGE_SIZE)) {
+       if (unlikely(reqlen > rte_mem_page_size())) {
                PMD_DRV_LOG(ERR, "RNDIS request %u greater than page size",
                            reqlen);
                return -EINVAL;
        }
 
-       sg.page = addr / PAGE_SIZE;
+       sg.page = addr / rte_mem_page_size();
        sg.ofs  = addr & PAGE_MASK;
        sg.len  = reqlen;
 
-       if (sg.ofs + reqlen >  PAGE_SIZE) {
+       if (sg.ofs + reqlen >  rte_mem_page_size()) {
                PMD_DRV_LOG(ERR, "RNDIS request crosses page boundary");
                return -EINVAL;
        }
@@ -479,7 +479,7 @@ hn_rndis_query(struct hn_data *hv, uint32_t oid,
                return -ENOMEM;
 
        comp_len = sizeof(*comp) + odlen;
-       comp = rte_zmalloc("QUERY", comp_len, PAGE_SIZE);
+       comp = rte_zmalloc("QUERY", comp_len, rte_mem_page_size());
        if (!comp) {
                error = -ENOMEM;
                goto done;
@@ -736,7 +736,7 @@ hn_rndis_set(struct hn_data *hv, uint32_t oid, const void *data, uint32_t dlen)
        int error;
 
        reqlen = sizeof(*req) + dlen;
-       req = rte_zmalloc("RNDIS_SET", reqlen, PAGE_SIZE);
+       req = rte_zmalloc("RNDIS_SET", reqlen, rte_mem_page_size());
        if (!req)
                return -ENOMEM;
 
index 0f4ef01..c6bf7cc 100644 (file)
@@ -1387,7 +1387,8 @@ static unsigned int hn_get_slots(const struct rte_mbuf *m)
                unsigned int size = rte_pktmbuf_data_len(m);
                unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK;
 
-               slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE;
+               slots += (offs + size + rte_mem_page_size() - 1) /
+                               rte_mem_page_size();
                m = m->next;
        }
 
@@ -1402,12 +1403,13 @@ static unsigned int hn_fill_sg(struct vmbus_gpa *sg,
 
        while (m) {
                rte_iova_t addr = rte_mbuf_data_iova(m);
-               unsigned int page = addr / PAGE_SIZE;
+               unsigned int page = addr / rte_mem_page_size();
                unsigned int offset = addr & PAGE_MASK;
                unsigned int len = rte_pktmbuf_data_len(m);
 
                while (len > 0) {
-                       unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset);
+                       unsigned int bytes = RTE_MIN(len,
+                                       rte_mem_page_size() - offset);
 
                        sg[segs].page = page;
                        sg[segs].ofs = offset;
@@ -1450,7 +1452,7 @@ static int hn_xmit_sg(struct hn_tx_queue *txq,
        addr = txq->tx_rndis_iova +
                ((char *)txd->rndis_pkt - (char *)txq->tx_rndis);
 
-       sg[0].page = addr / PAGE_SIZE;
+       sg[0].page = addr / rte_mem_page_size();
        sg[0].ofs = addr & PAGE_MASK;
        sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt));
        segs = 1;
index b7405ca..4364240 100644 (file)
@@ -6,6 +6,8 @@
  * All rights reserved.
  */
 
+#include <rte_eal_paging.h>
+
 /*
  * Tunable ethdev params
  */
 
 #define HN_RX_EXTMBUF_ENABLE   0
 
-/* Buffers need to be aligned */
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
 #ifndef PAGE_MASK
-#define PAGE_MASK (PAGE_SIZE - 1)
+#define PAGE_MASK (rte_mem_page_size() - 1)
 #endif
 
 struct hn_data;
index 21d5490..2c987d1 100644 (file)
@@ -98,7 +98,7 @@
  *
  * Note the sizeof(struct vring_desc) is 16 bytes.
  */
-#define VIRTIO_MAX_INDIRECT ((int)(PAGE_SIZE / 16))
+#define VIRTIO_MAX_INDIRECT ((int)(rte_mem_page_size() / 16))
 
 /*
  * Maximum number of virtqueues per device.
index 333a524..289b240 100644 (file)
@@ -21,8 +21,8 @@
 #include <rte_errno.h>
 #include <rte_cpuflags.h>
 #include <rte_vect.h>
-
 #include <rte_memory.h>
+#include <rte_eal_paging.h>
 #include <rte_eal.h>
 #include <rte_dev.h>
 #include <rte_cycles.h>
@@ -469,7 +469,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
                sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
        } else if (queue_type == VTNET_CQ) {
                /* Allocate a page for control vq command, data and status */
-               sz_hdr_mz = PAGE_SIZE;
+               sz_hdr_mz = rte_mem_page_size();
        }
 
        vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
@@ -568,7 +568,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
                cvq->mz = mz;
                cvq->virtio_net_hdr_mz = hdr_mz;
                cvq->virtio_net_hdr_mem = hdr_mz->iova;
-               memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
+               memset(cvq->virtio_net_hdr_mz->addr, 0, rte_mem_page_size());
 
                hw->cvq = cvq;
        }
index 873327d..5a501e7 100644 (file)
 
 #include "virtio.h"
 
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
 #define VIRTIO_MAX_RX_QUEUES 128U
 #define VIRTIO_MAX_TX_QUEUES 128U
 #define VIRTIO_MAX_MAC_ADDRS 64
index 6a1b44b..39237ae 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/virtio_net.h>
 #include <stdbool.h>
 
+#include <rte_eal_paging.h>
 #include <rte_malloc.h>
 #include <rte_memory.h>
 #include <rte_bus_pci.h>
@@ -30,10 +31,6 @@ RTE_LOG_REGISTER(ifcvf_vdpa_logtype, pmd.net.ifcvf_vdpa, NOTICE);
        rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
                "IFCVF %s(): " fmt "\n", __func__, ##args)
 
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
 #define IFCVF_USED_RING_LEN(size) \
        ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
 
@@ -600,11 +597,11 @@ m_ifcvf_start(struct ifcvf_internal *internal)
        for (i = 0; i < nr_vring; i++) {
                rte_vhost_get_vhost_vring(vid, i, &vq);
 
-               size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
-                               PAGE_SIZE);
-               vring_buf = rte_zmalloc("ifcvf", size, PAGE_SIZE);
+               size = RTE_ALIGN_CEIL(vring_size(vq.size, rte_mem_page_size()),
+                               rte_mem_page_size());
+               vring_buf = rte_zmalloc("ifcvf", size, rte_mem_page_size());
                vring_init(&internal->m_vring[i], vq.size, vring_buf,
-                               PAGE_SIZE);
+                               rte_mem_page_size());
 
                ret = rte_vfio_container_dma_map(internal->vfio_container_fd,
                        (uint64_t)(uintptr_t)vring_buf, m_vring_iova, size);
@@ -686,8 +683,8 @@ m_ifcvf_stop(struct ifcvf_internal *internal)
                len = IFCVF_USED_RING_LEN(vq.size);
                rte_vhost_log_used_vring(vid, i, 0, len);
 
-               size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
-                               PAGE_SIZE);
+               size = RTE_ALIGN_CEIL(vring_size(vq.size, rte_mem_page_size()),
+                               rte_mem_page_size());
                rte_vfio_container_dma_unmap(internal->vfio_container_fd,
                        (uint64_t)(uintptr_t)internal->m_vring[i].desc,
                        m_vring_iova, size);