X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_hwrm.c;h=06737b1e1a206d46705f67e71bc43678df49f95d;hb=46667f9377ffcb48af3c6d8998dd4bc6df8436f5;hp=0e96d3c4d7edb09374fdd2efb6cc9d49f7584c85;hpb=3320acbf4b45943b4201873e7f6df206fecd2d86;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index 0e96d3c4d7..06737b1e1a 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -277,7 +277,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN)) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; req.vlan_tag_tbl_addr = rte_cpu_to_le_64( - rte_mem_virt2phy(vlan_table)); + rte_mem_virt2iova(vlan_table)); req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count); } req.mask = rte_cpu_to_le_32(mask); @@ -318,7 +318,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, req.fid = rte_cpu_to_le_16(fid); req.vlan_tag_mask_tbl_addr = - rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table)); + rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table)); req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -367,7 +367,8 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, uint16_t j = dst_id - 1; //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ - if (conf->pool_map[j].pools & (1UL << j)) { + if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) && + conf->pool_map[j].pools & (1UL << j)) { RTE_LOG(DEBUG, PMD, "Add vlan %u to vmdq pool %u\n", conf->pool_map[j].vlan_id, j); @@ -426,12 +427,95 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, return rc; } +int bnxt_hwrm_ptp_cfg(struct bnxt *bp) +{ + struct hwrm_port_mac_cfg_input req = {.req_type = 0}; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t flags = 0; + int rc; + + if (!ptp) + return 0; + + HWRM_PREP(req, PORT_MAC_CFG); + + if (ptp->rx_filter) + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; + else + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; + if (ptp->tx_tstamp_en) + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; + else + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; + req.flags = rte_cpu_to_le_32(flags); + req.enables = + rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); + req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_UNLOCK(); + + return rc; +} + +static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0}; + struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + +/* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */ + if (ptp) + return 0; + + HWRM_PREP(req, PORT_MAC_PTP_QCFG); + + req.port_id = rte_cpu_to_le_16(bp->pf.port_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + + if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS)) + return 0; + + ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0); + if (!ptp) + return -ENOMEM; + + ptp->rx_regs[BNXT_PTP_RX_TS_L] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_lower); + ptp->rx_regs[BNXT_PTP_RX_TS_H] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_upper); + ptp->rx_regs[BNXT_PTP_RX_SEQ] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id); + ptp->rx_regs[BNXT_PTP_RX_FIFO] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo); + ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv); + ptp->tx_regs[BNXT_PTP_TX_TS_L] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_lower); + ptp->tx_regs[BNXT_PTP_TX_TS_H] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_upper); + ptp->tx_regs[BNXT_PTP_TX_SEQ] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id); + ptp->tx_regs[BNXT_PTP_TX_FIFO] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo); + + ptp->bp = bp; + bp->ptp_cfg = ptp; + + return 0; +} + int bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; struct hwrm_func_qcaps_input req = {.req_type = 0 }; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; uint16_t new_max_vfs; + uint32_t flags; int i; HWRM_PREP(req, FUNC_QCAPS); @@ -443,6 +527,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) HWRM_CHECK_RESULT(); bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); + flags = rte_le_to_cpu_32(resp->flags); if (BNXT_PF(bp)) { bp->pf.port_id = resp->port_id; bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); @@ -499,8 +584,16 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->max_vnics = 1; } bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); - if (BNXT_PF(bp)) + if (BNXT_PF(bp)) { bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics); + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) { + bp->flags |= BNXT_FLAG_PTP_SUPPORTED; + RTE_LOG(INFO, PMD, "PTP SUPPORTED"); + HWRM_UNLOCK(); + bnxt_hwrm_ptp_qcfg(bp); + } + } + HWRM_UNLOCK(); return rc; @@ -549,7 +642,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) } req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */ - memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd)); + //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd)); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -643,7 +736,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) } rte_mem_lock_page(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); + rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == 0) { RTE_LOG(ERR, PMD, "Unable to map response buffer to physical memory.\n"); @@ -669,7 +762,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) } rte_mem_lock_page(bp->hwrm_short_cmd_req_addr); bp->hwrm_short_cmd_req_dma_addr = - rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr); + rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr); if (bp->hwrm_short_cmd_req_dma_addr == 0) { rte_free(bp->hwrm_short_cmd_req_addr); RTE_LOG(ERR, PMD, @@ -714,34 +807,38 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) struct hwrm_port_phy_cfg_input req = {0}; struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint32_t enables = 0; - uint32_t link_speed_mask = - HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; HWRM_PREP(req, PORT_PHY_CFG); if (conf->link_up) { + /* Setting Fixed Speed. But AutoNeg is ON, So disable it */ + if (bp->link_info.auto_mode && conf->link_speed) { + req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE; + RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n"); + } + req.flags = rte_cpu_to_le_32(conf->phy_flags); req.force_link_speed = rte_cpu_to_le_16(conf->link_speed); + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; /* * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set * any auto mode, even "none". */ if (!conf->link_speed) { - req.auto_mode = conf->auto_mode; - enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; - if (conf->auto_mode == - HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) { - req.auto_link_speed_mask = - conf->auto_link_speed_mask; - enables |= link_speed_mask; - } - if (bp->link_info.auto_link_speed) { - req.auto_link_speed = - bp->link_info.auto_link_speed; - enables |= - HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED; - } + /* No speeds specified. Enable AutoNeg - all speeds */ + req.auto_mode = + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS; + } + /* AutoNeg - Advertise speeds specified. */ + if (conf->auto_link_speed_mask) { + req.auto_mode = + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK; + req.auto_link_speed_mask = + conf->auto_link_speed_mask; + enables |= + HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; } + req.auto_duplex = conf->duplex; enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX; req.auto_pause = conf->auto_pause; @@ -785,11 +882,13 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0; link_info->link_speed = rte_le_to_cpu_16(resp->link_speed); - link_info->duplex = resp->duplex; + link_info->duplex = resp->duplex_cfg; link_info->pause = resp->pause; link_info->auto_pause = resp->auto_pause; link_info->force_pause = resp->force_pause; link_info->auto_mode = resp->auto_mode; + link_info->phy_type = resp->phy_type; + link_info->media_type = resp->media_type; link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds); link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed); @@ -1039,7 +1138,6 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id); HWRM_UNLOCK(); - bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id; return rc; } @@ -1562,19 +1660,15 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { - if (i >= bp->rx_cp_nr_rings) + if (i >= bp->rx_cp_nr_rings) { cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring; - else + } else { cpr = bp->rx_queues[i]->cp_ring; + bp->grp_info[i].fw_stats_ctx = -1; + } if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) { rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i); cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE; - /* - * TODO. Need a better way to reset grp_info.stats_ctx - * for Rx rings only. stats_ctx is not saved for Tx - * in grp_info. - */ - bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; if (rc) return rc; } @@ -1634,7 +1728,6 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, bnxt_hwrm_ring_free(bp, cp_ring, HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL); cp_ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID; memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring)); cpr->cp_raw_cons = 0; @@ -1752,7 +1845,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) if (bp->hwrm_cmd_resp_addr == NULL) return -ENOMEM; bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); + rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == 0) { RTE_LOG(ERR, PMD, "unable to map response address to physical memory\n"); @@ -1885,6 +1978,11 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed) return hw_link_duplex; } +static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link) +{ + return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1; +} + static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) { uint16_t eth_link_speed = 0; @@ -2093,9 +2191,9 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) int rc = 0; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_link_info link_req; - uint16_t speed; + uint16_t speed, autoneg; - if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) + if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) return 0; rc = bnxt_valid_link_speed(dev_conf->link_speeds, @@ -2108,20 +2206,28 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) if (!link_up) goto port_phy_cfg; + autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds); speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds); link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY; - if (speed == 0) { + if (autoneg == 1) { link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG; - link_req.auto_mode = - HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK; link_req.auto_link_speed_mask = bnxt_parse_eth_link_speed_mask(bp, dev_conf->link_speeds); } else { + if (bp->link_info.phy_type == + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET || + bp->link_info.phy_type == + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE || + bp->link_info.media_type == + HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) { + RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n"); + return -EINVAL; + } + link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE; link_req.link_speed = speed; - RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed); } link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds); link_req.auto_pause = bp->link_info.auto_pause; @@ -2143,6 +2249,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t flags; int rc = 0; HWRM_PREP(req, FUNC_QCFG); @@ -2154,6 +2261,9 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) /* Hard Coded.. 0xfff VLAN ID mask */ bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff; + flags = rte_le_to_cpu_16(resp->flags); + if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST)) + bp->flags |= BNXT_FLAG_MULTI_HOST; switch (resp->port_partition_type) { case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: @@ -2621,7 +2731,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_page_addr[0] = - rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf)); + rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf)); if (req.req_buf_page_addr[0] == 0) { RTE_LOG(ERR, PMD, "unable to map buffer address to physical memory\n"); @@ -3025,7 +3135,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) uint32_t entry_length; uint8_t *buf; size_t buflen; - phys_addr_t dma_handle; + rte_iova_t dma_handle; struct hwrm_nvm_get_dir_entries_input req = {0}; struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr; @@ -3043,7 +3153,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) rte_mem_lock_page(buf); if (buf == NULL) return -ENOMEM; - dma_handle = rte_mem_virt2phy(buf); + dma_handle = rte_mem_virt2iova(buf); if (dma_handle == 0) { RTE_LOG(ERR, PMD, "unable to map response address to physical memory\n"); @@ -3070,7 +3180,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, { int rc; uint8_t *buf; - phys_addr_t dma_handle; + rte_iova_t dma_handle; struct hwrm_nvm_read_input req = {0}; struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr; @@ -3079,7 +3189,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, if (!buf) return -ENOMEM; - dma_handle = rte_mem_virt2phy(buf); + dma_handle = rte_mem_virt2iova(buf); if (dma_handle == 0) { RTE_LOG(ERR, PMD, "unable to map response address to physical memory\n"); @@ -3124,7 +3234,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, int rc; struct hwrm_nvm_write_input req = {0}; struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr; - phys_addr_t dma_handle; + rte_iova_t dma_handle; uint8_t *buf; HWRM_PREP(req, NVM_WRITE); @@ -3140,7 +3250,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, if (!buf) return -ENOMEM; - dma_handle = rte_mem_virt2phy(buf); + dma_handle = rte_mem_virt2iova(buf); if (dma_handle == 0) { RTE_LOG(ERR, PMD, "unable to map response address to physical memory\n"); @@ -3195,7 +3305,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf); req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); - req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids)); + req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids)); if (req.vnic_id_tbl_addr == 0) { HWRM_UNLOCK(); @@ -3555,7 +3665,6 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, HWRM_UNLOCK(); filter->fw_ntuple_filter_id = -1; - filter->fw_l2_filter_id = -1; return 0; }