X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fhns3%2Fhns3_ethdev.c;h=0add73d2f304beb7170d56c7f278f49abdc9e8e8;hb=31cdde02f0be730fc1b9b12a8b53f8a04fa423fc;hp=d4751d4780439b0769a92cc99552201b3f0857d5;hpb=6dca716c9e1daa8ea770a4a198bd068e72a2e03c;p=dpdk.git diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index d4751d4780..0add73d2f3 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -79,6 +79,11 @@ static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on); static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev); +static int hns3_add_mc_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); +static int hns3_remove_mc_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); + static void hns3_pf_disable_irq0(struct hns3_hw *hw) { @@ -232,23 +237,25 @@ hns3_interrupt_handler(void *param) static int hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) { -#define HNS3_VLAN_OFFSET_160 160 +#define HNS3_VLAN_ID_OFFSET_STEP 160 +#define HNS3_VLAN_BYTE_SIZE 8 struct hns3_vlan_filter_pf_cfg_cmd *req; struct hns3_hw *hw = &hns->hw; uint8_t vlan_offset_byte_val; struct hns3_cmd_desc desc; uint8_t vlan_offset_byte; - uint8_t vlan_offset_160; + uint8_t vlan_offset_base; int ret; hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); - vlan_offset_160 = vlan_id / HNS3_VLAN_OFFSET_160; - vlan_offset_byte = (vlan_id % HNS3_VLAN_OFFSET_160) / 8; - vlan_offset_byte_val = 1 << (vlan_id % 8); + vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; + vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / + HNS3_VLAN_BYTE_SIZE; + vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; - req->vlan_offset = vlan_offset_160; + req->vlan_offset = vlan_offset_base; req->vlan_cfg = on ? 0 : 1; req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; @@ -310,11 +317,9 @@ hns3_restore_vlan_table(struct hns3_adapter *hns) uint16_t vlan_id; int ret = 0; - if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) { - ret = hns3_vlan_pvid_configure(hns, pf->port_base_vlan_cfg.pvid, - 1); - return ret; - } + if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) + return hns3_vlan_pvid_configure(hns, + pf->port_base_vlan_cfg.pvid, 1); LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { if (vlan_entry->hd_tbl_status) { @@ -871,6 +876,12 @@ hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) struct hns3_hw *hw = &hns->hw; int ret; + if (pvid > RTE_ETHER_MAX_VLAN_ID) { + hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, + RTE_ETHER_MAX_VLAN_ID); + return -EINVAL; + } + rte_spinlock_lock(&hw->lock); ret = hns3_vlan_pvid_configure(hns, pvid, on); rte_spinlock_unlock(&hw->lock); @@ -1401,21 +1412,83 @@ hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) return ret; } +static int +hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *addr; + int ret; + int i; + + for (i = 0; i < hw->mc_addrs_num; i++) { + addr = &hw->mc_addrs[i]; + /* Check if there are duplicate addresses */ + if (rte_is_same_ether_addr(addr, mac_addr)) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "failed to add mc mac addr, same addrs" + "(%s) is added by the set_mc_mac_addr_list " + "API", mac_str); + return -EINVAL; + } + } + + ret = hns3_add_mc_addr(hw, mac_addr); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", + mac_str, ret); + } + return ret; +} + +static int +hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + ret = hns3_remove_mc_addr(hw, mac_addr); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d", + mac_str, ret); + } + return ret; +} + static int hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, - uint32_t idx, __attribute__ ((unused)) uint32_t pool) + uint32_t idx, __rte_unused uint32_t pool) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; int ret; rte_spinlock_lock(&hw->lock); - ret = hns3_add_uc_addr_common(hw, mac_addr); + + /* + * In hns3 network engine adding UC and MC mac address with different + * commands with firmware. We need to determine whether the input + * address is a UC or a MC address to call different commands. + * By the way, it is recommended calling the API function named + * rte_eth_dev_set_mc_addr_list to set the MC mac address, because + * using the rte_eth_dev_mac_addr_add API function to set MC mac address + * may affect the specifications of UC mac addresses. + */ + if (rte_is_multicast_ether_addr(mac_addr)) + ret = hns3_add_mc_addr_common(hw, mac_addr); + else + ret = hns3_add_uc_addr_common(hw, mac_addr); + if (ret) { rte_spinlock_unlock(&hw->lock); rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); - hns3_err(hw, "Failed to add mac addr(%s): %d", mac_str, ret); + hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, + ret); return ret; } @@ -1437,7 +1510,7 @@ hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) if (!rte_is_valid_assigned_ether_addr(mac_addr)) { rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); - hns3_err(hw, "Remove unicast mac addr err! addr(%s) invalid", + hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", mac_str); return -EINVAL; } @@ -1464,16 +1537,18 @@ hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) int ret; rte_spinlock_lock(&hw->lock); - ret = hns3_remove_uc_addr_common(hw, mac_addr); + + if (rte_is_multicast_ether_addr(mac_addr)) + ret = hns3_remove_mc_addr_common(hw, mac_addr); + else + ret = hns3_remove_uc_addr_common(hw, mac_addr); + rte_spinlock_unlock(&hw->lock); if (ret) { - rte_spinlock_unlock(&hw->lock); rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); - hns3_err(hw, "Failed to remove mac addr(%s): %d", mac_str, ret); - return; + hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, + ret); } - - rte_spinlock_unlock(&hw->lock); } static int @@ -1487,15 +1562,10 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, bool rm_succes = false; int ret, ret_val; - /* check if mac addr is valid */ - if (!rte_is_valid_assigned_ether_addr(mac_addr)) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - mac_addr); - hns3_err(hw, "Failed to set mac addr, addr(%s) invalid", - mac_str); - return -EINVAL; - } - + /* + * It has been guaranteed that input parameter named mac_addr is valid + * address in the rte layer of DPDK framework. + */ oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; default_addr_setted = hw->mac.default_addr_setted; if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr)) @@ -1574,19 +1644,22 @@ hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) { addr = &hw->data->mac_addrs[i]; - if (!rte_is_valid_assigned_ether_addr(addr)) + if (rte_is_zero_ether_addr(addr)) continue; - if (del) - ret = hns3_remove_uc_addr_common(hw, addr); + if (rte_is_multicast_ether_addr(addr)) + ret = del ? hns3_remove_mc_addr(hw, addr) : + hns3_add_mc_addr(hw, addr); else - ret = hns3_add_uc_addr_common(hw, addr); + ret = del ? hns3_remove_uc_addr_common(hw, addr) : + hns3_add_uc_addr_common(hw, addr); + if (ret) { err = ret; rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); - hns3_dbg(hw, - "Failed to %s mac addr(%s). ret:%d i:%d", - del ? "remove" : "restore", mac_str, ret, i); + hns3_err(hw, "failed to %s mac addr(%s) index:%d " + "ret = %d.", del ? "remove" : "restore", + mac_str, i, ret); } } return err; @@ -1633,7 +1706,7 @@ hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) if (!rte_is_multicast_ether_addr(mac_addr)) { rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); - hns3_err(hw, "Failed to add mc mac addr, addr(%s) invalid", + hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", mac_str); return -EINVAL; } @@ -1662,7 +1735,7 @@ hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) hns3_err(hw, "mc mac vlan table is full"); rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); - hns3_err(hw, "Failed to add mc mac addr(%s): %d", mac_str, ret); + hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); } return ret; @@ -1727,7 +1800,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, uint32_t j; if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { - hns3_err(hw, "Failed to set mc mac addr, nb_mc_addr(%d) " + hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) " "invalid. valid range: 0~%d", nb_mc_addr, HNS3_MC_MACADDR_NUM); return -EINVAL; @@ -1740,7 +1813,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, - "Failed to set mc mac addr, addr(%s) invalid.", + "failed to set mc mac addr, addr(%s) invalid.", mac_str); return -EINVAL; } @@ -1751,12 +1824,30 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); - hns3_err(hw, "Failed to set mc mac addr, " + hns3_err(hw, "failed to set mc mac addr, " "addrs invalid. two same addrs(%s).", mac_str); return -EINVAL; } } + + /* + * Check if there are duplicate addresses between mac_addrs + * and mc_addr_set + */ + for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) { + if (rte_is_same_ether_addr(addr, + &hw->data->mac_addrs[j])) { + rte_ether_format_addr(mac_str, + RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "failed to set mc mac addr, " + "addrs invalid. addrs(%s) has already " + "configured in mac_addr add API", + mac_str); + return -EINVAL; + } + } } return 0; @@ -2023,34 +2114,98 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev) } static int -hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id, - bool mmap, uint16_t queue_id) +hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, + enum hns3_ring_type queue_type, uint16_t queue_id) { - struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct hns3_cmd_desc desc; struct hns3_ctrl_vector_chain_cmd *req = (struct hns3_ctrl_vector_chain_cmd *)desc.data; enum hns3_cmd_status status; enum hns3_opcode_type op; uint16_t tqp_type_and_id = 0; + const char *op_str; + uint16_t type; + uint16_t gl; op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; hns3_cmd_setup_basic_desc(&desc, op, false); req->int_vector_id = vector_id; + if (queue_type == HNS3_RING_TYPE_RX) + gl = HNS3_RING_GL_RX; + else + gl = HNS3_RING_GL_TX; + + type = queue_type; + hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, - HNS3_RING_TYPE_RX); + type); hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, - HNS3_RING_GL_RX); + gl); req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); - req->int_cause_num = 1; + op_str = mmap ? "Map" : "Unmap"; status = hns3_cmd_send(hw, &desc, 1); if (status) { - hns3_err(hw, "Map TQP %d fail, vector_id is %d, status is %d.", - queue_id, vector_id, status); - return -EIO; + hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.", + op_str, queue_id, req->int_vector_id, status); + return status; + } + + return 0; +} + +static int +hns3_init_ring_with_vector(struct hns3_hw *hw) +{ + uint8_t vec; + int ret; + int i; + + /* + * In hns3 network engine, vector 0 is always the misc interrupt of this + * function, vector 1~N can be used respectively for the queues of the + * function. Tx and Rx queues with the same number share the interrupt + * vector. In the initialization clearing the all hardware mapping + * relationship configurations between queues and interrupt vectors is + * needed, so some error caused by the residual configurations, such as + * the unexpected Tx interrupt, can be avoid. Because of the hardware + * constraints in hns3 hardware engine, we have to implement clearing + * the mapping relationship configurations by binding all queues to the + * last interrupt vector and reserving the last interrupt vector. This + * method results in a decrease of the maximum queues when upper + * applications call the rte_eth_dev_configure API function to enable + * Rx interrupt. + */ + vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ + hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */ + for (i = 0; i < hw->intr_tqps_num; i++) { + /* + * Set gap limiter and rate limiter configuration of queue's + * interrupt. + */ + hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, + HNS3_TQP_INTR_GL_DEFAULT); + hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, + HNS3_TQP_INTR_GL_DEFAULT); + hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); + + ret = hns3_bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_TX, i); + if (ret) { + PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " + "vector: %d, ret=%d", i, vec, ret); + return ret; + } + + ret = hns3_bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_RX, i); + if (ret) { + PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " + "vector: %d, ret=%d", i, vec, ret); + return ret; + } } return 0; @@ -2171,12 +2326,10 @@ hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) } ret = hns3_buffer_alloc(hw); - if (ret) { + if (ret) hns3_err(hw, "Failed to allocate buffer, ret = %d", ret); - return ret; - } - return 0; + return ret; } static int @@ -2227,8 +2380,16 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) { struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + uint16_t queue_num = hw->tqps_num; - info->max_rx_queues = hw->tqps_num; + /* + * In interrupt mode, 'max_rx_queues' is set based on the number of + * MSI-X interrupt resources of the hardware. + */ + if (hw->data->dev_conf.intr_conf.rxq == 1) + queue_num = hw->intr_tqps_num; + + info->max_rx_queues = queue_num; info->max_tx_queues = hw->tqps_num; info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ info->min_rx_bufsize = hw->rx_buf_len; @@ -2397,6 +2558,7 @@ hns3_query_pf_resource(struct hns3_hw *hw) struct hns3_pf *pf = &hns->pf; struct hns3_pf_res_cmd *req; struct hns3_cmd_desc desc; + uint16_t num_msi; int ret; hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); @@ -2428,9 +2590,9 @@ hns3_query_pf_resource(struct hns3_hw *hw) pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); - hw->num_msi = - hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number), - HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S); + num_msi = hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number), + HNS3_VEC_NUM_M, HNS3_VEC_NUM_S); + hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi; return 0; } @@ -2587,6 +2749,7 @@ hns3_get_board_configuration(struct hns3_hw *hw) hw->mac.media_type = cfg.media_type; hw->rss_size_max = cfg.rss_size_max; + hw->rss_dis_flag = false; hw->rx_buf_len = cfg.rx_buf_len; memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); hw->mac.phy_addr = cfg.phy_addr; @@ -2648,12 +2811,10 @@ hns3_get_configuration(struct hns3_hw *hw) } ret = hns3_get_board_configuration(hw); - if (ret) { + if (ret) PMD_INIT_LOG(ERR, "Failed to get board configuration: %d", ret); - return ret; - } - return 0; + return ret; } static int @@ -3470,6 +3631,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) "add mac ethertype failed for undefined, code=%d.", resp_code); return_status = -EIO; + break; } return return_status; @@ -3576,7 +3738,7 @@ hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) ret = hns3_cmd_send(hw, &desc, 1); if (ret) - PMD_INIT_LOG(ERR, "Set promisc mode fail, status is %d", ret); + PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); return ret; } @@ -3587,7 +3749,6 @@ hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) struct hns3_promisc_param param; bool en_bc_pmc = true; uint8_t vf_id; - int ret; /* * In current version VF is not supported when PF is driven by DPDK @@ -3597,11 +3758,7 @@ hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) vf_id = 0; hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); - ret = hns3_cmd_set_promisc_mode(hw, ¶m); - if (ret) - return ret; - - return 0; + return hns3_cmd_set_promisc_mode(hw, ¶m); } static int @@ -3629,14 +3786,14 @@ hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false; int ret; rte_spinlock_lock(&hw->lock); - ret = hns3_set_promisc_mode(hw, true, en_mc_pmc); + ret = hns3_set_promisc_mode(hw, true, true); rte_spinlock_unlock(&hw->lock); if (ret) - hns3_err(hw, "Failed to enable promiscuous mode: %d", ret); + hns3_err(hw, "Failed to enable promiscuous mode, ret = %d", + ret); return ret; } @@ -3644,17 +3801,18 @@ hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) static int hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) { + bool allmulti = dev->data->all_multicast ? true : false; struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false; int ret; /* If now in all_multicast mode, must remain in all_multicast mode. */ rte_spinlock_lock(&hw->lock); - ret = hns3_set_promisc_mode(hw, false, en_mc_pmc); + ret = hns3_set_promisc_mode(hw, false, allmulti); rte_spinlock_unlock(&hw->lock); if (ret) - hns3_err(hw, "Failed to disable promiscuous mode: %d", ret); + hns3_err(hw, "Failed to disable promiscuous mode, ret = %d", + ret); return ret; } @@ -3664,14 +3822,17 @@ hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false; int ret; + if (dev->data->promiscuous) + return 0; + rte_spinlock_lock(&hw->lock); - ret = hns3_set_promisc_mode(hw, en_uc_pmc, true); + ret = hns3_set_promisc_mode(hw, false, true); rte_spinlock_unlock(&hw->lock); if (ret) - hns3_err(hw, "Failed to enable allmulticast mode: %d", ret); + hns3_err(hw, "Failed to enable allmulticast mode, ret = %d", + ret); return ret; } @@ -3681,18 +3842,18 @@ hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false; int ret; /* If now in promiscuous mode, must remain in all_multicast mode. */ - if (dev->data->promiscuous == 1) + if (dev->data->promiscuous) return 0; rte_spinlock_lock(&hw->lock); - ret = hns3_set_promisc_mode(hw, en_uc_pmc, false); + ret = hns3_set_promisc_mode(hw, false, false); rte_spinlock_unlock(&hw->lock); if (ret) - hns3_err(hw, "Failed to disable allmulticast mode: %d", ret); + hns3_err(hw, "Failed to disable allmulticast mode, ret = %d", + ret); return ret; } @@ -3701,13 +3862,12 @@ static int hns3_dev_promisc_restore(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; - bool en_mc_pmc; - bool en_uc_pmc; + bool allmulti = hw->data->all_multicast ? true : false; - en_uc_pmc = (hw->data->promiscuous == 1) ? true : false; - en_mc_pmc = (hw->data->all_multicast == 1) ? true : false; + if (hw->data->promiscuous) + return hns3_set_promisc_mode(hw, true, true); - return hns3_set_promisc_mode(hw, en_uc_pmc, en_mc_pmc); + return hns3_set_promisc_mode(hw, false, allmulti); } static int @@ -3947,6 +4107,19 @@ hns3_init_hardware(struct hns3_adapter *hns) PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); goto err_mac_init; } + + /* + * In the initialization clearing the all hardware mapping relationship + * configurations between queues and interrupt vectors is needed, so + * some error caused by the residual configurations, such as the + * unexpected interrupt, can be avoid. + */ + ret = hns3_init_ring_with_vector(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); + goto err_mac_init; + } + return 0; err_mac_init: @@ -4037,13 +4210,10 @@ err_get_config: rte_intr_disable(&pci_dev->intr_handle); hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, eth_dev); - err_intr_callback_register: - hns3_cmd_uninit(hw); - err_cmd_init: + hns3_cmd_uninit(hw); hns3_cmd_destroy_queue(hw); - err_cmd_init_queue: hw->io_base = NULL; @@ -4109,9 +4279,9 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; + uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; uint32_t intr_vector; - uint8_t base = 0; - uint8_t vec = 0; uint16_t q_id; int ret; @@ -4147,7 +4317,9 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev) } if (rte_intr_dp_is_en(intr_handle)) { for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { - ret = hns3_bind_ring_with_vector(dev, vec, true, q_id); + ret = hns3_bind_ring_with_vector(hw, vec, true, + HNS3_RING_TYPE_RX, + q_id); if (ret) goto bind_vector_error; intr_handle->intr_vec[q_id] = vec; @@ -4170,6 +4342,37 @@ alloc_intr_vec_error: return ret; } +static int +hns3_restore_rx_interrupt(struct hns3_hw *hw) +{ + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint16_t q_id; + int ret; + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return 0; + + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hns3_bind_ring_with_vector(hw, + intr_handle->intr_vec[q_id], true, + HNS3_RING_TYPE_RX, q_id); + if (ret) + return ret; + } + } + + return 0; +} + +static void +hns3_restore_filter(struct rte_eth_dev *dev) +{ + hns3_restore_rss_filter(dev); +} + static int hns3_dev_start(struct rte_eth_dev *dev) { @@ -4190,17 +4393,30 @@ hns3_dev_start(struct rte_eth_dev *dev) rte_spinlock_unlock(&hw->lock); return ret; } + ret = hns3_map_rx_interrupt(dev); + if (ret) { + hw->adapter_state = HNS3_NIC_CONFIGURED; + rte_spinlock_unlock(&hw->lock); + return ret; + } hw->adapter_state = HNS3_NIC_STARTED; rte_spinlock_unlock(&hw->lock); - ret = hns3_map_rx_interrupt(dev); - if (ret) - return ret; hns3_set_rxtx_function(dev); hns3_mp_req_start_rxtx(dev); rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); + hns3_restore_filter(dev); + + /* Enable interrupt of all rx queues before enabling queues */ + hns3_dev_all_rx_queue_intr_enable(hw, true); + /* + * When finished the initialization, enable queues to receive/transmit + * packets. + */ + hns3_enable_all_queues(hw, true); + hns3_info(hw, "hns3 dev start successful!"); return 0; } @@ -4233,8 +4449,8 @@ hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - uint8_t base = 0; - uint8_t vec = 0; + uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; + uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; uint16_t q_id; if (dev->data->dev_conf.intr_conf.rxq == 0) @@ -4247,7 +4463,9 @@ hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) } if (rte_intr_dp_is_en(intr_handle)) { for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { - (void)hns3_bind_ring_with_vector(dev, vec, false, q_id); + (void)hns3_bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_RX, + q_id); if (vec < base + intr_handle->nb_efd - 1) vec++; } @@ -4279,12 +4497,12 @@ hns3_dev_stop(struct rte_eth_dev *dev) rte_spinlock_lock(&hw->lock); if (rte_atomic16_read(&hw->reset.resetting) == 0) { hns3_do_stop(hns); + hns3_unmap_rx_interrupt(dev); hns3_dev_release_mbufs(hns); hw->adapter_state = HNS3_NIC_CONFIGURED; } rte_eal_alarm_cancel(hns3_service_handler, dev); rte_spinlock_unlock(&hw->lock); - hns3_unmap_rx_interrupt(dev); } static void @@ -4517,31 +4735,24 @@ hns3_reinit_dev(struct hns3_adapter *hns) ret = hns3_reset_all_queues(hns); if (ret) { hns3_err(hw, "Failed to reset all queues: %d", ret); - goto err_init; + return ret; } ret = hns3_init_hardware(hns); if (ret) { hns3_err(hw, "Failed to init hardware: %d", ret); - goto err_init; + return ret; } ret = hns3_enable_hw_error_intr(hns, true); if (ret) { hns3_err(hw, "fail to enable hw error interrupts: %d", ret); - goto err_mac_init; + return ret; } hns3_info(hw, "Reset done, driver initialization finished."); return 0; - -err_mac_init: - hns3_uninit_umv_space(hw); -err_init: - hns3_cmd_uninit(hw); - - return ret; } static bool @@ -4805,9 +5016,18 @@ hns3_start_service(struct hns3_adapter *hns) eth_dev = &rte_eth_devices[hw->data->port_id]; hns3_set_rxtx_function(eth_dev); hns3_mp_req_start_rxtx(eth_dev); - if (hw->adapter_state == HNS3_NIC_STARTED) + if (hw->adapter_state == HNS3_NIC_STARTED) { hns3_service_handler(eth_dev); + /* Enable interrupt of all rx queues before enabling queues */ + hns3_dev_all_rx_queue_intr_enable(hw, true); + /* + * When finished the initialization, enable queues to receive + * and transmit packets. + */ + hns3_enable_all_queues(hw, true); + } + return 0; } @@ -4841,6 +5061,10 @@ hns3_restore_conf(struct hns3_adapter *hns) if (ret) goto err_promisc; + ret = hns3_restore_rx_interrupt(hw); + if (ret) + goto err_promisc; + if (hns->hw.adapter_state == HNS3_NIC_STARTED) { ret = hns3_do_start(hns, false); if (ret)