mem: rename address mapping function to IOVA
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
index d379850..1fdc51c 100644 (file)
@@ -259,7 +259,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
         * by ethtool.
         */
        if (vnic->flags & BNXT_VNIC_INFO_BCAST)
-               mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
        if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
                mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
        if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
@@ -277,7 +277,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
                if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
                        mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
                req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
-                        rte_mem_virt2phy(vlan_table));
+                        rte_mem_virt2iova(vlan_table));
                req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
        }
        req.mask = rte_cpu_to_le_32(mask);
@@ -318,7 +318,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
        req.fid = rte_cpu_to_le_16(fid);
 
        req.vlan_tag_mask_tbl_addr =
-               rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
+               rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
        req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -360,7 +360,24 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
        int rc = 0;
        struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
        struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+       struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+       const struct rte_eth_vmdq_rx_conf *conf =
+                   &dev_conf->rx_adv_conf.vmdq_rx_conf;
        uint32_t enables = 0;
+       uint16_t j = dst_id - 1;
+
+       //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
+       if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+           conf->pool_map[j].pools & (1UL << j)) {
+               RTE_LOG(DEBUG, PMD,
+                       "Add vlan %u to vmdq pool %u\n",
+                       conf->pool_map[j].vlan_id, j);
+
+               filter->l2_ivlan = conf->pool_map[j].vlan_id;
+               filter->enables |=
+                       HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
+                       HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
+       }
 
        if (filter->fw_l2_filter_id != UINT64_MAX)
                bnxt_hwrm_clear_l2_filter(bp, filter);
@@ -384,9 +401,15 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
        if (enables &
            HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
                req.l2_ovlan = filter->l2_ovlan;
+       if (enables &
+           HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
+               req.l2_ovlan = filter->l2_ivlan;
        if (enables &
            HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
                req.l2_ovlan_mask = filter->l2_ovlan_mask;
+       if (enables &
+           HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
+               req.l2_ovlan_mask = filter->l2_ivlan_mask;
        if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
                req.src_id = rte_cpu_to_le_32(filter->src_id);
        if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
@@ -621,7 +644,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                }
                rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
                bp->hwrm_cmd_resp_dma_addr =
-                       rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+                       rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
                if (bp->hwrm_cmd_resp_dma_addr == 0) {
                        RTE_LOG(ERR, PMD,
                        "Unable to map response buffer to physical memory.\n");
@@ -647,7 +670,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                }
                rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
                bp->hwrm_short_cmd_req_dma_addr =
-                       rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
+                       rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
                if (bp->hwrm_short_cmd_req_dma_addr == 0) {
                        rte_free(bp->hwrm_short_cmd_req_addr);
                        RTE_LOG(ERR, PMD,
@@ -763,7 +786,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
                (link_info->phy_link_status ==
                 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
        link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
-       link_info->duplex = resp->duplex;
+       link_info->duplex = resp->duplex_cfg;
        link_info->pause = resp->pause;
        link_info->auto_pause = resp->auto_pause;
        link_info->force_pause = resp->force_pause;
@@ -1730,7 +1753,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
        if (bp->hwrm_cmd_resp_addr == NULL)
                return -ENOMEM;
        bp->hwrm_cmd_resp_dma_addr =
-               rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+               rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
        if (bp->hwrm_cmd_resp_dma_addr == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -1918,7 +1941,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
                ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
                ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
 
-static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
+static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
 {
        uint32_t one_speed;
 
@@ -2599,7 +2622,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
                         page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
        req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
        req.req_buf_page_addr[0] =
-               rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
+               rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
        if (req.req_buf_page_addr[0] == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map buffer address to physical memory\n");
@@ -3021,7 +3044,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
        rte_mem_lock_page(buf);
        if (buf == NULL)
                return -ENOMEM;
-       dma_handle = rte_mem_virt2phy(buf);
+       dma_handle = rte_mem_virt2iova(buf);
        if (dma_handle == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -3057,7 +3080,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
        if (!buf)
                return -ENOMEM;
 
-       dma_handle = rte_mem_virt2phy(buf);
+       dma_handle = rte_mem_virt2iova(buf);
        if (dma_handle == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -3118,7 +3141,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
        if (!buf)
                return -ENOMEM;
 
-       dma_handle = rte_mem_virt2phy(buf);
+       dma_handle = rte_mem_virt2iova(buf);
        if (dma_handle == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -3173,7 +3196,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
 
        req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
        req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
-       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
+       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
 
        if (req.vnic_id_tbl_addr == 0) {
                HWRM_UNLOCK();