memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(DEBUG,
- "physical address same as virtual\n");
- PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "unable to map addr to phys memory\n");
- return -ENOMEM;
- }
- }
- rte_mem_lock_page(((char *)mz->addr));
rmem->pg_tbl = mz->addr;
rmem->pg_tbl_map = mz_phys_addr;
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(DEBUG,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
- for (sz = 0; sz < mem_size; sz += BNXT_PAGE_SIZE)
- rte_mem_lock_page(((char *)mz->addr) + sz);
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "unable to map addr to phys memory\n");
- return -ENOMEM;
- }
- }
for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
- rte_mem_lock_page(((char *)mz->addr) + sz);
rmem->pg_arr[i] = ((char *)mz->addr) + sz;
rmem->dma_arr[i] = mz_phys_addr + sz;
}
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(DEBUG,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(DEBUG,
- "Using rte_mem_virt2iova()\n");
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "Can't map address to physical memory\n");
- return -ENOMEM;
- }
- }
bp->rx_mem_zone = (const void *)mz;
bp->hw_rx_port_stats = mz->addr;
}
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(DEBUG,
- "Memzone physical address same as virtual\n");
- PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "Can't map address to physical memory\n");
- return -ENOMEM;
- }
- }
bp->tx_mem_zone = (const void *)mz;
bp->hw_tx_port_stats = mz->addr;
if (vlan_table) {
if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
- req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
- rte_mem_virt2iova(vlan_table));
+ req.vlan_tag_tbl_addr =
+ rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
}
req.mask = rte_cpu_to_le_32(mask);
req.fid = rte_cpu_to_le_16(fid);
req.vlan_tag_mask_tbl_addr =
- rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
+ rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
rc = -ENOMEM;
goto error;
}
- rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
+ rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"Unable to map response buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
}
- rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
bp->hwrm_short_cmd_req_dma_addr =
- rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
+ rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
rte_free(bp->hwrm_short_cmd_req_addr);
PMD_DRV_LOG(ERR,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
bp->max_resp_len = HWRM_MAX_RESP_LEN;
bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
- rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
+ rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr0 =
- rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
+ rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
buflen = dir_entries * entry_length;
buf = rte_malloc("nvm_dir", buflen, 0);
- rte_mem_lock_page(buf);
if (buf == NULL)
return -ENOMEM;
- dma_handle = rte_mem_virt2iova(buf);
+ dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
buf = rte_malloc("nvm_item", length, 0);
- rte_mem_lock_page(buf);
if (!buf)
return -ENOMEM;
- dma_handle = rte_mem_virt2iova(buf);
+ dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
uint8_t *buf;
buf = rte_malloc("nvm_write", data_len, 0);
- rte_mem_lock_page(buf);
if (!buf)
return -ENOMEM;
- dma_handle = rte_mem_virt2iova(buf);
+ dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
- req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
+ req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
HWRM_UNLOCK();
uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
const struct rte_memzone *mz = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
- rte_iova_t mz_phys_addr_base;
rte_iova_t mz_phys_addr;
- int sz;
int stats_len = (tx_ring_info || rx_ring_info) ?
RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
- mz_phys_addr_base = mz->iova;
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr_base) {
- PMD_DRV_LOG(DEBUG,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
- for (sz = 0; sz < total_alloc_len; sz += getpagesize())
- rte_mem_lock_page(((char *)mz->addr) + sz);
- mz_phys_addr_base = rte_mem_virt2iova(mz->addr);
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "unable to map ring address to physical memory\n");
- return -ENOMEM;
- }
- }
if (tx_ring_info) {
txq->mz = mz;