The page size is often retrieved from the macro PAGE_SIZE.
If PAGE_SIZE is not defined, it is either using hard coded default,
or getting the system value from the UNIX-only function sysconf().
Such definitions are replaced with the generic function
rte_mem_page_size() defined for each supported OS.
Removing PAGE_SIZE definitions will fix dlb drivers for musl libc,
because #ifdef checks were missing, causing redefinition errors.
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Andrew Boyer <aboyer@pensando.io>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: David Marchand <david.marchand@redhat.com>
Acked-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
#ifdef VFIO_PRESENT
-#ifndef PAGE_SIZE
-#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
-#endif
-#define PAGE_MASK (~(PAGE_SIZE - 1))
-
static struct rte_tailq_elem rte_vfio_tailq = {
.name = "VFIO_RESOURCE_LIST",
};
*/
uint32_t table_start = msix_table->offset;
uint32_t table_end = table_start + msix_table->size;
- table_end = RTE_ALIGN(table_end, PAGE_SIZE);
- table_start = RTE_ALIGN_FLOOR(table_start, PAGE_SIZE);
+ table_end = RTE_ALIGN(table_end, rte_mem_page_size());
+ table_start = RTE_ALIGN_FLOOR(table_start, rte_mem_page_size());
/* If page-aligned start of MSI-X table is less than the
* actual MSI-X table start address, reassign to the actual
vmbus_map_addr = vmbus_find_max_end_va();
/* offset is special in uio it indicates which resource */
- offset = idx * PAGE_SIZE;
+ offset = idx * rte_mem_page_size();
mapaddr = vmbus_map_resource(vmbus_map_addr, fd, offset, size, flags);
close(fd);
}
file_size = sb.st_size;
- if (file_size == 0 || (file_size & (PAGE_SIZE - 1))) {
+ if (file_size == 0 || (file_size & (rte_mem_page_size() - 1))) {
VMBUS_LOG(ERR, "incorrect size %s: %zu",
ring_path, file_size);
#include <stdbool.h>
#include <sys/uio.h>
#include <rte_log.h>
+#include <rte_eal_paging.h>
#include <rte_vmbus_reg.h>
#include <rte_bus_vmbus.h>
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
extern struct rte_vmbus_bus rte_vmbus_bus;
extern int vmbus_logtype_bus;
uint32_t value;
} feature_bits;
- /* Pad it to PAGE_SIZE so that data starts on page boundary */
+ /* Pad it to rte_mem_page_size() so that data starts on page boundary */
uint8_t reserved2[4028];
/*
for (i = 0; i != uio_res->nb_maps; i++) {
void *mapaddr;
- off_t offset = i * PAGE_SIZE;
+ off_t offset = i * rte_mem_page_size();
mapaddr = vmbus_map_resource(uio_res->maps[i].addr,
fd, offset,
}
dev->int_page = (uint32_t *)((char *)uio_res->maps[HV_INT_PAGE_MAP].addr
- + (PAGE_SIZE >> 1));
+ + (rte_mem_page_size() >> 1));
dev->monitor_page = uio_res->maps[HV_MON_PAGE_MAP].addr;
return 0;
}
#include <stdint.h>
+#include <rte_eal_paging.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_cryptodev.h>
*
* Note the sizeof(struct vring_desc) is 16 bytes.
*/
-#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
+#define VIRTIO_MAX_INDIRECT ((int) (rte_mem_page_size() / 16))
/* Do we get callbacks when the ring is completely used, even if we've
* suppressed them?
#include <rte_io.h>
#include <rte_kvargs.h>
#include <rte_log.h>
+#include <rte_eal_paging.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_power_intrinsics.h>
goto error_exit;
}
- qm_port->pp_mmio_base = DLB_LDB_PP_BASE + PAGE_SIZE * qm_port_id;
+ qm_port->pp_mmio_base = DLB_LDB_PP_BASE +
+ rte_mem_page_size() * qm_port_id;
qm_port->id = qm_port_id;
/* The credit window is one high water mark of QEs */
goto error_exit;
}
- qm_port->pp_mmio_base = DLB_DIR_PP_BASE + PAGE_SIZE * qm_port_id;
+ qm_port->pp_mmio_base = DLB_DIR_PP_BASE +
+ rte_mem_page_size() * qm_port_id;
qm_port->id = qm_port_id;
/* The credit window is one high water mark of QEs */
#define PP_BASE(is_dir) ((is_dir) ? DLB_DIR_PP_BASE : DLB_LDB_PP_BASE)
-#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
-
#define DLB_NUM_QES_PER_CACHE_LINE 4
#define DLB_MAX_ENQUEUE_DEPTH 64
pp_dma_base = (uintptr_t)hw->func_kva + DLB_PP_BASE(is_ldb);
- addr = (pp_dma_base + (PAGE_SIZE * port_id));
+ addr = (pp_dma_base + (rte_mem_page_size() * port_id));
return (void *)(uintptr_t)addr;
#include <rte_spinlock.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
-
-#ifndef PAGE_SIZE
-#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
-#endif
+#include <rte_eal_paging.h>
#include "base/dlb_hw_types.h"
#include "../dlb_user.h"
alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
- alloc_sz, PAGE_SIZE);
+ alloc_sz, rte_mem_page_size());
if (port_base == NULL)
return -ENOMEM;
pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
dlb_port[response.id][DLB_LDB].pp_addr =
- (void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
+ (void *)(uintptr_t)(pp_dma_base +
+ (rte_mem_page_size() * response.id));
dlb_port[response.id][DLB_LDB].cq_base =
(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
- alloc_sz, PAGE_SIZE);
+ alloc_sz, rte_mem_page_size());
if (port_base == NULL)
return -ENOMEM;
pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
dlb_port[response.id][DLB_DIR].pp_addr =
- (void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
+ (void *)(uintptr_t)(pp_dma_base +
+ (rte_mem_page_size() * response.id));
dlb_port[response.id][DLB_DIR].cq_base =
(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
DLB2_LDB_CQ_MAX_SIZE)
#define PP_BASE(is_dir) ((is_dir) ? DLB2_DIR_PP_BASE : DLB2_LDB_PP_BASE)
-#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
-
#define DLB2_NUM_QES_PER_CACHE_LINE 4
#define DLB2_MAX_ENQUEUE_DEPTH 64
uint64_t pp_dma_base;
pp_dma_base = (uintptr_t)hw->func_kva + DLB2_PP_BASE(is_ldb);
- addr = (pp_dma_base + (PAGE_SIZE * port_id));
+ addr = (pp_dma_base + (rte_mem_page_size() * port_id));
return (void *)(uintptr_t)addr;
}
#include <rte_spinlock.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
-
-#ifndef PAGE_SIZE
-#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
-#endif
+#include <rte_eal_paging.h>
#include "base/dlb2_hw_types.h"
#include "../dlb2_user.h"
alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
- PAGE_SIZE);
+ rte_mem_page_size());
if (port_base == NULL)
return -ENOMEM;
pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
dlb2_port[response.id][DLB2_LDB_PORT].pp_addr =
- (void *)(pp_base + (PAGE_SIZE * response.id));
+ (void *)(pp_base + (rte_mem_page_size() * response.id));
dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
memset(&port_memory, 0, sizeof(port_memory));
alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
- PAGE_SIZE);
+ rte_mem_page_size());
if (port_base == NULL)
return -ENOMEM;
pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
dlb2_port[response.id][DLB2_DIR_PORT].pp_addr =
- (void *)(pp_base + (PAGE_SIZE * response.id));
+ (void *)(pp_base + (rte_mem_page_size() * response.id));
dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
(void *)(port_base);
#ifndef ECORE_FW_DEFS_H
#define ECORE_FW_DEFS_H
+#include <rte_eal_paging.h>
+
#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[152].base)
#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[151].base + ((assertListEntry) * IRO[151].m1))
#define X_ETH_LOCAL_RING_SIZE 13
#define FIRST_BD_IN_PKT 0
#define PARSE_BD_INDEX 1
-#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
+#define NUM_OF_ETH_BDS_IN_PAGE \
+ (rte_mem_page_size() / (STRUCT_SIZE(eth_tx_bd) / 8))
#define U_ETH_NUM_OF_SGES_TO_FETCH 8
#define U_ETH_MAX_SGES_FOR_PACKET 3
#define IP_HEADER_ALIGNMENT_PADDING 2
#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
- (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
+ (0xFFFF - ((rte_mem_page_size() / ((STRUCT_SIZE(eth_rx_sge)) / 8)) - 1))
-#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
-#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
-#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
+#define TU_ETH_CQES_PER_PAGE \
+ (rte_mem_page_size() / (STRUCT_SIZE(eth_rx_cqe) / 8))
+#define U_ETH_BDS_PER_PAGE \
+ (rte_mem_page_size() / (STRUCT_SIZE(eth_rx_bd) / 8))
+#define U_ETH_SGES_PER_PAGE \
+ (rte_mem_page_size() / (STRUCT_SIZE(eth_rx_sge) / 8))
#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
/* Event Ring definitions */
#define C_ERES_PER_PAGE \
- (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
+ (rte_mem_page_size() / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
/* number of statistic command */
cq_size = num_descs * cq_desc_size;
sg_size = num_descs * sg_desc_size;
- total_size = RTE_ALIGN(q_size, PAGE_SIZE) +
- RTE_ALIGN(cq_size, PAGE_SIZE);
+ total_size = RTE_ALIGN(q_size, rte_mem_page_size()) +
+ RTE_ALIGN(cq_size, rte_mem_page_size());
/*
* Note: aligning q_size/cq_size is not enough due to cq_base address
* aligning as q_base could be not aligned to the page.
- * Adding PAGE_SIZE.
+ * Adding rte_mem_page_size().
*/
- total_size += PAGE_SIZE;
+ total_size += rte_mem_page_size();
if (flags & IONIC_QCQ_F_SG) {
- total_size += RTE_ALIGN(sg_size, PAGE_SIZE);
- total_size += PAGE_SIZE;
+ total_size += RTE_ALIGN(sg_size, rte_mem_page_size());
+ total_size += rte_mem_page_size();
}
new = rte_zmalloc("ionic", struct_size, 0);
new->q.info = rte_calloc_socket("ionic",
num_descs, sizeof(void *),
- PAGE_SIZE, socket_id);
+ rte_mem_page_size(), socket_id);
if (!new->q.info) {
IONIC_PRINT(ERR, "Cannot allocate queue info");
err = -ENOMEM;
q_base = new->base;
q_base_pa = new->base_pa;
- cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
- cq_base_pa = RTE_ALIGN(q_base_pa + q_size, PAGE_SIZE);
+ cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size,
+ rte_mem_page_size());
+ cq_base_pa = RTE_ALIGN(q_base_pa + q_size,
+ rte_mem_page_size());
if (flags & IONIC_QCQ_F_SG) {
sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size,
- PAGE_SIZE);
- sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
+ rte_mem_page_size());
+ sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size,
+ rte_mem_page_size());
ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
}
IONIC_PRINT(DEBUG, "Allocating Lif Info");
- lif->info_sz = RTE_ALIGN(sizeof(*lif->info), PAGE_SIZE);
+ lif->info_sz = RTE_ALIGN(sizeof(*lif->info), rte_mem_page_size());
lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev,
"lif_info", 0 /* queue_idx*/,
if (idev->port_info)
return 0;
- idev->port_info_sz = RTE_ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
+ idev->port_info_sz = RTE_ALIGN(sizeof(*idev->port_info),
+ rte_mem_page_size());
snprintf(z_name, sizeof(z_name), "%s_port_%s_info",
IONIC_DRV_NAME, adapter->name);
#include <rte_byteorder.h>
#include <rte_io.h>
#include <rte_memory.h>
+#include <rte_eal_paging.h>
#include "ionic_logs.h"
#ifndef PAGE_SHIFT
#define PAGE_SHIFT 12
-#define PAGE_SIZE (1 << PAGE_SHIFT)
#endif
#define __iomem
static void *hn_rndis_alloc(size_t size)
{
- return rte_zmalloc("RNDIS", size, PAGE_SIZE);
+ return rte_zmalloc("RNDIS", size, rte_mem_page_size());
}
#ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP
return -EINVAL;
}
- if (unlikely(reqlen > PAGE_SIZE)) {
+ if (unlikely(reqlen > rte_mem_page_size())) {
PMD_DRV_LOG(ERR, "RNDIS request %u greater than page size",
reqlen);
return -EINVAL;
}
- sg.page = addr / PAGE_SIZE;
+ sg.page = addr / rte_mem_page_size();
sg.ofs = addr & PAGE_MASK;
sg.len = reqlen;
- if (sg.ofs + reqlen > PAGE_SIZE) {
+ if (sg.ofs + reqlen > rte_mem_page_size()) {
PMD_DRV_LOG(ERR, "RNDIS request crosses page boundary");
return -EINVAL;
}
return -ENOMEM;
comp_len = sizeof(*comp) + odlen;
- comp = rte_zmalloc("QUERY", comp_len, PAGE_SIZE);
+ comp = rte_zmalloc("QUERY", comp_len, rte_mem_page_size());
if (!comp) {
error = -ENOMEM;
goto done;
int error;
reqlen = sizeof(*req) + dlen;
- req = rte_zmalloc("RNDIS_SET", reqlen, PAGE_SIZE);
+ req = rte_zmalloc("RNDIS_SET", reqlen, rte_mem_page_size());
if (!req)
return -ENOMEM;
unsigned int size = rte_pktmbuf_data_len(m);
unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK;
- slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE;
+ slots += (offs + size + rte_mem_page_size() - 1) /
+ rte_mem_page_size();
m = m->next;
}
while (m) {
rte_iova_t addr = rte_mbuf_data_iova(m);
- unsigned int page = addr / PAGE_SIZE;
+ unsigned int page = addr / rte_mem_page_size();
unsigned int offset = addr & PAGE_MASK;
unsigned int len = rte_pktmbuf_data_len(m);
while (len > 0) {
- unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset);
+ unsigned int bytes = RTE_MIN(len,
+ rte_mem_page_size() - offset);
sg[segs].page = page;
sg[segs].ofs = offset;
addr = txq->tx_rndis_iova +
((char *)txd->rndis_pkt - (char *)txq->tx_rndis);
- sg[0].page = addr / PAGE_SIZE;
+ sg[0].page = addr / rte_mem_page_size();
sg[0].ofs = addr & PAGE_MASK;
sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt));
segs = 1;
* All rights reserved.
*/
+#include <rte_eal_paging.h>
+
/*
* Tunable ethdev params
*/
#define HN_RX_EXTMBUF_ENABLE 0
-/* Buffers need to be aligned */
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
#ifndef PAGE_MASK
-#define PAGE_MASK (PAGE_SIZE - 1)
+#define PAGE_MASK (rte_mem_page_size() - 1)
#endif
struct hn_data;
*
* Note the sizeof(struct vring_desc) is 16 bytes.
*/
-#define VIRTIO_MAX_INDIRECT ((int)(PAGE_SIZE / 16))
+#define VIRTIO_MAX_INDIRECT ((int)(rte_mem_page_size() / 16))
/*
* Maximum number of virtqueues per device.
#include <rte_errno.h>
#include <rte_cpuflags.h>
#include <rte_vect.h>
-
#include <rte_memory.h>
+#include <rte_eal_paging.h>
#include <rte_eal.h>
#include <rte_dev.h>
#include <rte_cycles.h>
sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
} else if (queue_type == VTNET_CQ) {
/* Allocate a page for control vq command, data and status */
- sz_hdr_mz = PAGE_SIZE;
+ sz_hdr_mz = rte_mem_page_size();
}
vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
cvq->mz = mz;
cvq->virtio_net_hdr_mz = hdr_mz;
cvq->virtio_net_hdr_mem = hdr_mz->iova;
- memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
+ memset(cvq->virtio_net_hdr_mz->addr, 0, rte_mem_page_size());
hw->cvq = cvq;
}
#include "virtio.h"
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
#define VIRTIO_MAX_RX_QUEUES 128U
#define VIRTIO_MAX_TX_QUEUES 128U
#define VIRTIO_MAX_MAC_ADDRS 64
#include <linux/virtio_net.h>
#include <stdbool.h>
+#include <rte_eal_paging.h>
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_bus_pci.h>
rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
"IFCVF %s(): " fmt "\n", __func__, ##args)
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
#define IFCVF_USED_RING_LEN(size) \
((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
for (i = 0; i < nr_vring; i++) {
rte_vhost_get_vhost_vring(vid, i, &vq);
- size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
- PAGE_SIZE);
- vring_buf = rte_zmalloc("ifcvf", size, PAGE_SIZE);
+ size = RTE_ALIGN_CEIL(vring_size(vq.size, rte_mem_page_size()),
+ rte_mem_page_size());
+ vring_buf = rte_zmalloc("ifcvf", size, rte_mem_page_size());
vring_init(&internal->m_vring[i], vq.size, vring_buf,
- PAGE_SIZE);
+ rte_mem_page_size());
ret = rte_vfio_container_dma_map(internal->vfio_container_fd,
(uint64_t)(uintptr_t)vring_buf, m_vring_iova, size);
len = IFCVF_USED_RING_LEN(vq.size);
rte_vhost_log_used_vring(vid, i, 0, len);
- size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
- PAGE_SIZE);
+ size = RTE_ALIGN_CEIL(vring_size(vq.size, rte_mem_page_size()),
+ rte_mem_page_size());
rte_vfio_container_dma_unmap(internal->vfio_container_fd,
(uint64_t)(uintptr_t)internal->m_vring[i].desc,
m_vring_iova, size);