X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_ethdev_vf.c;h=675db44e854cb87cb1cf8758fb1818450a8ad326;hb=d6e5056ab38cfe0c27d7a7a631cdf259a7880111;hp=5770c4786c5754d6394965defa80c1dee3670cec;hpb=7e2e162ed04401eeb493c5655da9036842466e46;p=dpdk.git diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 5770c4786c..675db44e85 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -156,9 +156,12 @@ hns3vf_enable_msix(const struct rte_pci_device *device, bool op) if (ret < 0) { PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x", (pos + PCI_MSIX_FLAGS)); + return -ENXIO; } + return 0; } + return -ENXIO; } @@ -201,98 +204,6 @@ hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) return ret; } -static int -hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) -{ - char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; - struct rte_ether_addr *addr; - int ret; - int i; - - for (i = 0; i < hw->mc_addrs_num; i++) { - addr = &hw->mc_addrs[i]; - /* Check if there are duplicate addresses */ - if (rte_is_same_ether_addr(addr, mac_addr)) { - hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - addr); - hns3_err(hw, "failed to add mc mac addr, same addrs" - "(%s) is added by the set_mc_mac_addr_list " - "API", mac_str); - return -EINVAL; - } - } - - ret = hns3vf_add_mc_mac_addr(hw, mac_addr); - if (ret) { - hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - mac_addr); - hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", - mac_str, ret); - } - return ret; -} - -static int -hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, - __rte_unused uint32_t idx, - __rte_unused uint32_t pool) -{ - struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; - int ret; - - rte_spinlock_lock(&hw->lock); - - /* - * In hns3 network engine adding UC and MC mac address with different - * commands with firmware. We need to determine whether the input - * address is a UC or a MC address to call different commands. - * By the way, it is recommended calling the API function named - * rte_eth_dev_set_mc_addr_list to set the MC mac address, because - * using the rte_eth_dev_mac_addr_add API function to set MC mac address - * may affect the specifications of UC mac addresses. - */ - if (rte_is_multicast_ether_addr(mac_addr)) - ret = hns3vf_add_mc_addr_common(hw, mac_addr); - else - ret = hns3vf_add_uc_mac_addr(hw, mac_addr); - - rte_spinlock_unlock(&hw->lock); - if (ret) { - hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - mac_addr); - hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, - ret); - } - - return ret; -} - -static void -hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) -{ - struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - /* index will be checked by upper level rte interface */ - struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; - char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; - int ret; - - rte_spinlock_lock(&hw->lock); - - if (rte_is_multicast_ether_addr(mac_addr)) - ret = hns3vf_remove_mc_mac_addr(hw, mac_addr); - else - ret = hns3vf_remove_uc_mac_addr(hw, mac_addr); - - rte_spinlock_unlock(&hw->lock); - if (ret) { - hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - mac_addr); - hns3_err(hw, "failed to remove mac addr(%s), ret = %d", - mac_str, ret); - } -} - static int hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) @@ -346,39 +257,6 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, return ret; } -static int -hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del) -{ - struct hns3_hw *hw = &hns->hw; - struct rte_ether_addr *addr; - char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; - int err = 0; - int ret; - int i; - - for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) { - addr = &hw->data->mac_addrs[i]; - if (rte_is_zero_ether_addr(addr)) - continue; - if (rte_is_multicast_ether_addr(addr)) - ret = del ? hns3vf_remove_mc_mac_addr(hw, addr) : - hns3vf_add_mc_mac_addr(hw, addr); - else - ret = del ? hns3vf_remove_uc_mac_addr(hw, addr) : - hns3vf_add_uc_mac_addr(hw, addr); - - if (ret) { - err = ret; - hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - addr); - hns3_err(hw, "failed to %s mac addr(%s) index:%d " - "ret = %d.", del ? "remove" : "restore", - mac_str, i, ret); - } - } - return err; -} - static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) @@ -421,147 +299,6 @@ hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, return ret; } -static int -hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, - struct rte_ether_addr *mc_addr_set, - uint32_t nb_mc_addr) -{ - char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; - struct rte_ether_addr *addr; - uint32_t i; - uint32_t j; - - if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { - hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " - "invalid. valid range: 0~%d", - nb_mc_addr, HNS3_MC_MACADDR_NUM); - return -EINVAL; - } - - /* Check if input mac addresses are valid */ - for (i = 0; i < nb_mc_addr; i++) { - addr = &mc_addr_set[i]; - if (!rte_is_multicast_ether_addr(addr)) { - hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - addr); - hns3_err(hw, - "failed to set mc mac addr, addr(%s) invalid.", - mac_str); - return -EINVAL; - } - - /* Check if there are duplicate addresses */ - for (j = i + 1; j < nb_mc_addr; j++) { - if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { - hns3_ether_format_addr(mac_str, - RTE_ETHER_ADDR_FMT_SIZE, - addr); - hns3_err(hw, "failed to set mc mac addr, " - "addrs invalid. two same addrs(%s).", - mac_str); - return -EINVAL; - } - } - - /* - * Check if there are duplicate addresses between mac_addrs - * and mc_addr_set - */ - for (j = 0; j < HNS3_VF_UC_MACADDR_NUM; j++) { - if (rte_is_same_ether_addr(addr, - &hw->data->mac_addrs[j])) { - hns3_ether_format_addr(mac_str, - RTE_ETHER_ADDR_FMT_SIZE, - addr); - hns3_err(hw, "failed to set mc mac addr, " - "addrs invalid. addrs(%s) has already " - "configured in mac_addr add API", - mac_str); - return -EINVAL; - } - } - } - - return 0; -} - -static int -hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev, - struct rte_ether_addr *mc_addr_set, - uint32_t nb_mc_addr) -{ - struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_ether_addr *addr; - int cur_addr_num; - int set_addr_num; - int num; - int ret; - int i; - - ret = hns3vf_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); - if (ret) - return ret; - - rte_spinlock_lock(&hw->lock); - cur_addr_num = hw->mc_addrs_num; - for (i = 0; i < cur_addr_num; i++) { - num = cur_addr_num - i - 1; - addr = &hw->mc_addrs[num]; - ret = hns3vf_remove_mc_mac_addr(hw, addr); - if (ret) { - rte_spinlock_unlock(&hw->lock); - return ret; - } - - hw->mc_addrs_num--; - } - - set_addr_num = (int)nb_mc_addr; - for (i = 0; i < set_addr_num; i++) { - addr = &mc_addr_set[i]; - ret = hns3vf_add_mc_mac_addr(hw, addr); - if (ret) { - rte_spinlock_unlock(&hw->lock); - return ret; - } - - rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]); - hw->mc_addrs_num++; - } - rte_spinlock_unlock(&hw->lock); - - return 0; -} - -static int -hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) -{ - char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; - struct hns3_hw *hw = &hns->hw; - struct rte_ether_addr *addr; - int err = 0; - int ret; - int i; - - for (i = 0; i < hw->mc_addrs_num; i++) { - addr = &hw->mc_addrs[i]; - if (!rte_is_multicast_ether_addr(addr)) - continue; - if (del) - ret = hns3vf_remove_mc_mac_addr(hw, addr); - else - ret = hns3vf_add_mc_mac_addr(hw, addr); - if (ret) { - err = ret; - hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - addr); - hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d", - del ? "Remove" : "Restore", mac_str, ret); - } - } - return err; -} - static int hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, bool en_uc_pmc, bool en_mc_pmc) @@ -781,8 +518,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) uint16_t nb_rx_q = dev->data->nb_rx_queues; uint16_t nb_tx_q = dev->data->nb_tx_queues; struct rte_eth_rss_conf rss_conf; - uint32_t max_rx_pkt_len; - uint16_t mtu; bool gro_en; int ret; @@ -798,25 +533,23 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) * work as usual. But these fake queues are imperceptible, and can not * be used by upper applications. */ - if (!hns3_dev_indep_txrx_supported(hw)) { - ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); - if (ret) { - hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", - ret); - return ret; - } + ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); + if (ret) { + hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret); + hw->cfg_max_queues = 0; + return ret; } hw->adapter_state = HNS3_NIC_CONFIGURING; - if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { + if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) { hns3_err(hw, "setting link speed/duplex not supported"); ret = -EINVAL; goto cfg_err; } /* When RSS is not configured, redirect the packet queue 0 */ - if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { - conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { + conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; hw->rss_dis_flag = false; rss_conf = conf->rx_adv_conf.rss_conf; ret = hns3_dev_rss_hash_update(dev, &rss_conf); @@ -824,35 +557,16 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) goto cfg_err; } - /* - * If jumbo frames are enabled, MTU needs to be refreshed - * according to the maximum RX packet length. - */ - if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; - if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || - max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { - hns3_err(hw, "maximum Rx packet length must be greater " - "than %u and less than %u when jumbo frame enabled.", - (uint16_t)HNS3_DEFAULT_FRAME_LEN, - (uint16_t)HNS3_MAX_FRAME_LEN); - ret = -EINVAL; - goto cfg_err; - } - - mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); - ret = hns3vf_dev_mtu_set(dev, mtu); - if (ret) - goto cfg_err; - dev->data->mtu = mtu; - } + ret = hns3vf_dev_mtu_set(dev, conf->rxmode.mtu); + if (ret != 0) + goto cfg_err; ret = hns3vf_dev_configure_vlan(dev); if (ret) goto cfg_err; /* config hardware GRO */ - gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false; + gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false; ret = hns3_config_gro(hw, gro_en); if (ret) goto cfg_err; @@ -863,6 +577,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) return 0; cfg_err: + hw->cfg_max_queues = 0; (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); hw->adapter_state = HNS3_NIC_INITIALIZED; @@ -927,13 +642,6 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) rte_spinlock_unlock(&hw->lock); return ret; } - if (mtu > RTE_ETHER_MTU) - dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_JUMBO_FRAME; - else - dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_JUMBO_FRAME; - dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; rte_spinlock_unlock(&hw->lock); return 0; @@ -961,35 +669,34 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE; - info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_SCTP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | - DEV_RX_OFFLOAD_SCATTER | - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_RSS_HASH | - DEV_RX_OFFLOAD_TCP_LRO); - info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_MULTI_SEGS | - DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO | - DEV_TX_OFFLOAD_MBUF_FAST_FREE | + info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_VLAN_STRIP | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | + RTE_ETH_RX_OFFLOAD_RSS_HASH | + RTE_ETH_RX_OFFLOAD_TCP_LRO); + info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | + RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | hns3_txvlan_cap_get(hw)); - if (hns3_dev_outer_udp_cksum_supported(hw)) - info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; + if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) + info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; - if (hns3_dev_indep_txrx_supported(hw)) + if (hns3_dev_get_support(hw, INDEP_TXRX)) info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; @@ -1022,11 +729,14 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) .offloads = 0, }; - info->vmdq_queue_num = 0; - info->reta_size = hw->rss_ind_tbl_size; info->hash_key_size = HNS3_RSS_KEY_SIZE; info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; + + info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; + info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; + info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; + info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; @@ -1111,6 +821,8 @@ hns3vf_interrupt_handler(void *param) /* Read out interrupt causes */ event_cause = hns3vf_check_event_cause(hns, &clearval); + /* Clear interrupt causes */ + hns3vf_clear_event_cause(hw, clearval); switch (event_cause) { case HNS3VF_VECTOR0_EVENT_RST: @@ -1123,9 +835,6 @@ hns3vf_interrupt_handler(void *param) break; } - /* Clear interrupt causes */ - hns3vf_clear_event_cause(hw, clearval); - /* Enable interrupt */ hns3vf_enable_irq0(hw); } @@ -1403,28 +1112,48 @@ hns3vf_get_queue_depth(struct hns3_hw *hw) return 0; } +static void +hns3vf_update_caps(struct hns3_hw *hw, uint32_t caps) +{ + if (hns3_get_bit(caps, HNS3VF_CAPS_VLAN_FLT_MOD_B)) + hns3_set_bit(hw->capability, + HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 1); +} + static int -hns3vf_get_tc_info(struct hns3_hw *hw) +hns3vf_get_num_tc(struct hns3_hw *hw) { - uint8_t resp_msg; - int ret; + uint8_t num_tc = 0; uint32_t i; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0, - true, &resp_msg, sizeof(resp_msg)); - if (ret) { - hns3_err(hw, "VF request to get TC info from PF failed %d", - ret); - return ret; + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + if (hw->hw_tc_map & BIT(i)) + num_tc++; } + return num_tc; +} - hw->hw_tc_map = resp_msg; +static int +hns3vf_get_basic_info(struct hns3_hw *hw) +{ + uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE]; + struct hns3_basic_info *basic_info; + int ret; - for (i = 0; i < HNS3_MAX_TC_NUM; i++) { - if (hw->hw_tc_map & BIT(i)) - hw->num_tc++; + ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0, + true, resp_msg, sizeof(resp_msg)); + if (ret) { + hns3_err(hw, "failed to get basic info from PF, ret = %d.", + ret); + return ret; } + basic_info = (struct hns3_basic_info *)resp_msg; + hw->hw_tc_map = basic_info->hw_tc_map; + hw->num_tc = hns3vf_get_num_tc(hw); + hw->pf_vf_if_version = basic_info->pf_vf_if_version; + hns3vf_update_caps(hw, basic_info->caps); + return 0; } @@ -1463,6 +1192,11 @@ hns3vf_get_configuration(struct hns3_hw *hw) hns3vf_get_push_lsc_cap(hw); + /* Get basic info from PF */ + ret = hns3vf_get_basic_info(hw); + if (ret) + return ret; + /* Get queue configuration from PF */ ret = hns3vf_get_queue_info(hw); if (ret) @@ -1478,12 +1212,7 @@ hns3vf_get_configuration(struct hns3_hw *hw) if (ret) return ret; - ret = hns3vf_get_port_base_vlan_filter_state(hw); - if (ret) - return ret; - - /* Get tc configuration from PF */ - return hns3vf_get_tc_info(hw); + return hns3vf_get_port_base_vlan_filter_state(hw); } static int @@ -1492,18 +1221,6 @@ hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q, { struct hns3_hw *hw = &hns->hw; - if (nb_rx_q < hw->num_tc) { - hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).", - nb_rx_q, hw->num_tc); - return -EINVAL; - } - - if (nb_tx_q < hw->num_tc) { - hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).", - nb_tx_q, hw->num_tc); - return -EINVAL; - } - return hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q); } @@ -1511,7 +1228,6 @@ static void hns3vf_request_link_info(struct hns3_hw *hw) { struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); - uint8_t resp_msg; bool send_req; int ret; @@ -1524,7 +1240,7 @@ hns3vf_request_link_info(struct hns3_hw *hw) return; ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, - &resp_msg, sizeof(resp_msg)); + NULL, 0); if (ret) { hns3_err(hw, "failed to fetch link status, ret = %d", ret); return; @@ -1606,6 +1322,26 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) return ret; } +static int +hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) +{ + uint8_t msg_data; + int ret; + + if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD)) + return 0; + + msg_data = enable ? 1 : 0; + ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, + HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data, + sizeof(msg_data), true, NULL, 0); + if (ret) + hns3_err(hw, "%s vlan filter failed, ret = %d.", + enable ? "enable" : "disable", ret); + + return ret; +} + static int hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable) { @@ -1616,7 +1352,8 @@ hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable) ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG, &msg_data, sizeof(msg_data), false, NULL, 0); if (ret) - hns3_err(hw, "vf enable strip failed, ret =%d", ret); + hns3_err(hw, "vf %s strip failed, ret = %d.", + enable ? "enable" : "disable", ret); return ret; } @@ -1636,11 +1373,24 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask) } tmp_mask = (unsigned int)mask; + + if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) { + rte_spinlock_lock(&hw->lock); + /* Enable or disable VLAN filter */ + if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) + ret = hns3vf_en_vlan_filter(hw, true); + else + ret = hns3vf_en_vlan_filter(hw, false); + rte_spinlock_unlock(&hw->lock); + if (ret) + return ret; + } + /* Vlan stripping setting */ - if (tmp_mask & ETH_VLAN_STRIP_MASK) { + if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) { rte_spinlock_lock(&hw->lock); /* Enable or disable VLAN stripping */ - if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) ret = hns3vf_en_hw_strip_rxvtag(hw, true); else ret = hns3vf_en_hw_strip_rxvtag(hw, false); @@ -1708,7 +1458,7 @@ hns3vf_restore_vlan_conf(struct hns3_adapter *hns) int ret; dev_conf = &hw->data->dev_conf; - en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true + en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true : false; ret = hns3vf_en_hw_strip_rxvtag(hw, en); if (ret) @@ -1733,9 +1483,10 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev) } /* Apply vlan offload setting */ - ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); + ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK | + RTE_ETH_VLAN_FILTER_MASK); if (ret) - hns3_err(hw, "dev config vlan offload failed, ret =%d", ret); + hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret); return ret; } @@ -1756,11 +1507,10 @@ hns3vf_keep_alive_handler(void *param) struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - uint8_t respmsg; int ret; ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0, - false, &respmsg, sizeof(uint8_t)); + false, NULL, 0); if (ret) hns3_err(hw, "VF sends keeping alive cmd failed(=%d)", ret); @@ -1887,12 +1637,6 @@ hns3vf_init_hardware(struct hns3_adapter *hns) goto err_init_hardware; } - ret = hns3vf_set_alive(hw, true); - if (ret) { - PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret); - goto err_init_hardware; - } - return 0; err_init_hardware: @@ -1935,6 +1679,8 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) goto err_cmd_init; } + hns3_tx_push_init(eth_dev); + /* Get VF resource */ ret = hns3_query_vf_resource(hw); if (ret) @@ -1944,7 +1690,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) hns3vf_clear_event_cause(hw, 0); - ret = rte_intr_callback_register(&pci_dev->intr_handle, + ret = rte_intr_callback_register(pci_dev->intr_handle, hns3vf_interrupt_handler, eth_dev); if (ret) { PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); @@ -1952,7 +1698,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) } /* Enable interrupt */ - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_enable(pci_dev->intr_handle); hns3vf_enable_irq0(hw); /* Get configuration from PF */ @@ -1991,6 +1737,12 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) hns3_rss_set_default_args(hw); + ret = hns3vf_set_alive(hw, true); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret); + goto err_set_tc_queue; + } + return 0; err_set_tc_queue: @@ -1998,8 +1750,8 @@ err_set_tc_queue: err_get_config: hns3vf_disable_irq0(hw); - rte_intr_disable(&pci_dev->intr_handle); - hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler, + rte_intr_disable(pci_dev->intr_handle); + hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler, eth_dev); err_intr_callback_register: err_cmd_init: @@ -2024,10 +1776,11 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev) (void)hns3_config_gro(hw, false); (void)hns3vf_set_alive(hw, false); (void)hns3vf_set_promisc_mode(hw, false, false, false); + hns3_flow_uninit(eth_dev); hns3_tqp_stats_uninit(hw); hns3vf_disable_irq0(hw); - rte_intr_disable(&pci_dev->intr_handle); - hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler, + rte_intr_disable(pci_dev->intr_handle); + hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler, eth_dev); hns3_cmd_uninit(hw); hns3_cmd_destroy_queue(hw); @@ -2040,7 +1793,7 @@ hns3vf_do_stop(struct hns3_adapter *hns) struct hns3_hw *hw = &hns->hw; int ret; - hw->mac.link_status = ETH_LINK_DOWN; + hw->mac.link_status = RTE_ETH_LINK_DOWN; /* * The "hns3vf_do_stop" function will also be called by .stop_service to @@ -2054,7 +1807,7 @@ hns3vf_do_stop(struct hns3_adapter *hns) hns3_dev_release_mbufs(hns); if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { - hns3vf_configure_mac_addr(hns, true); + hns3_configure_all_mac_addr(hns, true); ret = hns3_reset_all_tqps(hns); if (ret) { hns3_err(hw, "failed to reset all queues ret = %d", @@ -2070,7 +1823,7 @@ hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; uint16_t q_id; @@ -2088,16 +1841,16 @@ hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev) (void)hns3vf_bind_ring_with_vector(hw, vec, false, HNS3_RING_TYPE_RX, q_id); - if (vec < base + intr_handle->nb_efd - 1) + if (vec < base + rte_intr_nb_efd_get(intr_handle) + - 1) vec++; } } /* Clean datapath event and queue/vec mapping */ rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec) { - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } + + /* Cleanup vector list */ + rte_intr_vec_list_free(intr_handle); } static int @@ -2115,7 +1868,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) /* Disable datapath on secondary process. */ hns3_mp_req_stop_rxtx(dev); /* Prevent crashes when queues are still in use. */ - rte_delay_ms(hw->tqps_num); + rte_delay_ms(hw->cfg_max_queues); rte_spinlock_lock(&hw->lock); if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { @@ -2139,11 +1892,8 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) struct hns3_hw *hw = &hns->hw; int ret = 0; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - rte_free(eth_dev->process_private); - eth_dev->process_private = NULL; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - } if (hw->adapter_state == HNS3_NIC_STARTED) ret = hns3vf_dev_stop(eth_dev); @@ -2152,13 +1902,11 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) hns3_reset_abort(hns); hw->adapter_state = HNS3_NIC_CLOSED; rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev); - hns3vf_configure_all_mc_mac_addr(hns, true); + hns3_configure_all_mc_mac_addr(hns, true); hns3vf_remove_all_vlan_table(hns); hns3vf_uninit_vf(eth_dev); hns3_free_all_queues(eth_dev); rte_free(hw->reset.wait_data); - rte_free(eth_dev->process_private); - eth_dev->process_private = NULL; hns3_mp_uninit_primary(); hns3_warn(hw, "Close port %u finished", hw->data->port_id); @@ -2183,8 +1931,11 @@ hns3vf_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, HNS3_FW_VERSION_BYTE1_S), hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, HNS3_FW_VERSION_BYTE0_S)); + if (ret < 0) + return -EINVAL; + ret += 1; /* add the size of '\0' */ - if (fw_size < (uint32_t)ret) + if (fw_size < (size_t)ret) return ret; else return 0; @@ -2201,29 +1952,31 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev, memset(&new_link, 0, sizeof(new_link)); switch (mac->link_speed) { - case ETH_SPEED_NUM_10M: - case ETH_SPEED_NUM_100M: - case ETH_SPEED_NUM_1G: - case ETH_SPEED_NUM_10G: - case ETH_SPEED_NUM_25G: - case ETH_SPEED_NUM_40G: - case ETH_SPEED_NUM_50G: - case ETH_SPEED_NUM_100G: - case ETH_SPEED_NUM_200G: - new_link.link_speed = mac->link_speed; + case RTE_ETH_SPEED_NUM_10M: + case RTE_ETH_SPEED_NUM_100M: + case RTE_ETH_SPEED_NUM_1G: + case RTE_ETH_SPEED_NUM_10G: + case RTE_ETH_SPEED_NUM_25G: + case RTE_ETH_SPEED_NUM_40G: + case RTE_ETH_SPEED_NUM_50G: + case RTE_ETH_SPEED_NUM_100G: + case RTE_ETH_SPEED_NUM_200G: + if (mac->link_status) + new_link.link_speed = mac->link_speed; break; default: if (mac->link_status) - new_link.link_speed = ETH_SPEED_NUM_UNKNOWN; - else - new_link.link_speed = ETH_SPEED_NUM_NONE; + new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; break; } + if (!mac->link_status) + new_link.link_speed = RTE_ETH_SPEED_NUM_NONE; + new_link.link_duplex = mac->link_duplex; - new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; + new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; new_link.link_autoneg = - !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); + !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED); return rte_eth_linkstatus_set(eth_dev, &new_link); } @@ -2253,7 +2006,7 @@ static int hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; @@ -2276,16 +2029,13 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) if (rte_intr_efd_enable(intr_handle, intr_vector)) return -EINVAL; - if (intr_handle->intr_vec == NULL) { - intr_handle->intr_vec = - rte_zmalloc("intr_vec", - hw->used_rx_queues * sizeof(int), 0); - if (intr_handle->intr_vec == NULL) { - hns3_err(hw, "Failed to allocate %u rx_queues" - " intr_vec", hw->used_rx_queues); - ret = -ENOMEM; - goto vf_alloc_intr_vec_error; - } + /* Allocate vector list */ + if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", + hw->used_rx_queues)) { + hns3_err(hw, "Failed to allocate %u rx_queues" + " intr_vec", hw->used_rx_queues); + ret = -ENOMEM; + goto vf_alloc_intr_vec_error; } if (rte_intr_allow_others(intr_handle)) { @@ -2298,20 +2048,22 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) HNS3_RING_TYPE_RX, q_id); if (ret) goto vf_bind_vector_error; - intr_handle->intr_vec[q_id] = vec; + + if (rte_intr_vec_list_index_set(intr_handle, q_id, vec)) + goto vf_bind_vector_error; + /* * If there are not enough efds (e.g. not enough interrupt), * remaining queues will be bond to the last interrupt. */ - if (vec < base + intr_handle->nb_efd - 1) + if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1) vec++; } rte_intr_enable(intr_handle); return 0; vf_bind_vector_error: - free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; + rte_intr_vec_list_free(intr_handle); vf_alloc_intr_vec_error: rte_intr_efd_disable(intr_handle); return ret; @@ -2322,7 +2074,7 @@ hns3vf_restore_rx_interrupt(struct hns3_hw *hw) { struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; uint16_t q_id; int ret; @@ -2332,8 +2084,9 @@ hns3vf_restore_rx_interrupt(struct hns3_hw *hw) if (rte_intr_dp_is_en(intr_handle)) { for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { ret = hns3vf_bind_ring_with_vector(hw, - intr_handle->intr_vec[q_id], true, - HNS3_RING_TYPE_RX, q_id); + rte_intr_vec_list_index_get(intr_handle, + q_id), + true, HNS3_RING_TYPE_RX, q_id); if (ret) return ret; } @@ -2465,7 +2218,8 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) /* Check the registers to confirm whether there is reset pending */ hns3vf_check_event_cause(hns, NULL); reset = hns3vf_get_reset_level(hw, &hw->reset.pending); - if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { + if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET && + hw->reset.level < reset) { hns3_warn(hw, "High level reset %d is pending", reset); return true; } @@ -2503,7 +2257,7 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete"); return -EAGAIN; } else if (wait_data->result == HNS3_WAIT_TIMEOUT) { - gettimeofday(&tv, NULL); + hns3_clock_gettime(&tv); hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", tv.tv_sec, tv.tv_usec); return -ETIME; @@ -2513,7 +2267,7 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) wait_data->hns = hns; wait_data->check_completion = is_vf_reset_done; wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT * - HNS3VF_RESET_WAIT_MS + get_timeofday_ms(); + HNS3VF_RESET_WAIT_MS + hns3_clock_gettime_ms(); wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC; wait_data->count = HNS3VF_RESET_WAIT_CNT; wait_data->result = HNS3_WAIT_REQUEST; @@ -2550,17 +2304,17 @@ hns3vf_stop_service(struct hns3_adapter *hns) * Make sure call update link status before hns3vf_stop_poll_job * because update link status depend on polling job exist. */ - hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed, + hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed, hw->mac.link_duplex); hns3vf_stop_poll_job(eth_dev); } - hw->mac.link_status = ETH_LINK_DOWN; + hw->mac.link_status = RTE_ETH_LINK_DOWN; hns3_set_rxtx_function(eth_dev); rte_wmb(); /* Disable datapath on secondary process. */ hns3_mp_req_stop_rxtx(eth_dev); - rte_delay_ms(hw->tqps_num); + rte_delay_ms(hw->cfg_max_queues); rte_spinlock_lock(&hw->lock); if (hw->adapter_state == HNS3_NIC_STARTED || @@ -2577,7 +2331,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) * required to delete the entries. */ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) - hns3vf_configure_all_mc_mac_addr(hns, true); + hns3_configure_all_mc_mac_addr(hns, true); rte_spinlock_unlock(&hw->lock); return 0; @@ -2663,11 +2417,11 @@ hns3vf_restore_conf(struct hns3_adapter *hns) if (ret) return ret; - ret = hns3vf_configure_mac_addr(hns, false); + ret = hns3_configure_all_mac_addr(hns, false); if (ret) return ret; - ret = hns3vf_configure_all_mc_mac_addr(hns, false); + ret = hns3_configure_all_mc_mac_addr(hns, false); if (ret) goto err_mc_mac; @@ -2698,12 +2452,19 @@ hns3vf_restore_conf(struct hns3_adapter *hns) hns3_info(hw, "hns3vf dev restart successful!"); } else if (hw->adapter_state == HNS3_NIC_STOPPING) hw->adapter_state = HNS3_NIC_CONFIGURED; + + ret = hns3vf_set_alive(hw, true); + if (ret) { + hns3_err(hw, "failed to VF send alive to PF: %d", ret); + goto err_vlan_table; + } + return 0; err_vlan_table: - hns3vf_configure_all_mc_mac_addr(hns, true); + hns3_configure_all_mc_mac_addr(hns, true); err_mc_mac: - hns3vf_configure_mac_addr(hns, true); + hns3_configure_all_mac_addr(hns, true); return ret; } @@ -2768,14 +2529,13 @@ hns3vf_reset_service(void *param) */ reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending); if (reset_level != HNS3_NONE_RESET) { - gettimeofday(&tv_start, NULL); + hns3_clock_gettime(&tv_start); hns3_reset_process(hns, reset_level); - gettimeofday(&tv, NULL); + hns3_clock_gettime(&tv); timersub(&tv, &tv_start, &tv_delta); - msec = tv_delta.tv_sec * MSEC_PER_SEC + - tv_delta.tv_usec / USEC_PER_MSEC; + msec = hns3_clock_calctime_ms(&tv_delta); if (msec > HNS3_RESET_PROCESS_MS) - hns3_err(hw, "%d handle long time delta %" PRIx64 + hns3_err(hw, "%d handle long time delta %" PRIu64 " ms time=%ld.%.6ld", hw->reset.level, msec, tv.tv_sec, tv.tv_usec); } @@ -2790,7 +2550,7 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) int ret; if (hw->reset.level == HNS3_VF_FULL_RESET) { - rte_intr_disable(&pci_dev->intr_handle); + rte_intr_disable(pci_dev->intr_handle); ret = hns3vf_set_bus_master(pci_dev, true); if (ret < 0) { hns3_err(hw, "failed to set pci bus, ret = %d", ret); @@ -2816,7 +2576,7 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) hns3_err(hw, "Failed to enable msix"); } - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_enable(pci_dev->intr_handle); } ret = hns3_reset_all_tqps(hns); @@ -2867,10 +2627,10 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = { .txq_info_get = hns3_txq_info_get, .rx_burst_mode_get = hns3_rx_burst_mode_get, .tx_burst_mode_get = hns3_tx_burst_mode_get, - .mac_addr_add = hns3vf_add_mac_addr, - .mac_addr_remove = hns3vf_remove_mac_addr, + .mac_addr_add = hns3_add_mac_addr, + .mac_addr_remove = hns3_remove_mac_addr, .mac_addr_set = hns3vf_set_default_mac_addr, - .set_mc_addr_list = hns3vf_set_mc_mac_addr_list, + .set_mc_addr_list = hns3_set_mc_mac_addr_list, .link_update = hns3vf_dev_link_update, .rss_hash_update = hns3_dev_rss_hash_update, .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, @@ -2894,6 +2654,15 @@ static const struct hns3_reset_ops hns3vf_reset_ops = { .start_service = hns3vf_start_service, }; +static void +hns3vf_init_hw_ops(struct hns3_hw *hw) +{ + hw->ops.add_mc_mac_addr = hns3vf_add_mc_mac_addr; + hw->ops.del_mc_mac_addr = hns3vf_remove_mc_mac_addr; + hw->ops.add_uc_mac_addr = hns3vf_add_uc_mac_addr; + hw->ops.del_uc_mac_addr = hns3vf_remove_uc_mac_addr; +} + static int hns3vf_dev_init(struct rte_eth_dev *eth_dev) { @@ -2903,17 +2672,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); - eth_dev->process_private = (struct hns3_process_private *) - rte_zmalloc_socket("hns3_filter_list", - sizeof(struct hns3_process_private), - RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node); - if (eth_dev->process_private == NULL) { - PMD_INIT_LOG(ERR, "Failed to alloc memory for process private"); - return -ENOMEM; - } - - /* initialize flow filter lists */ - hns3_filterlist_init(eth_dev); + hns3_flow_init(eth_dev); hns3_set_rxtx_function(eth_dev); eth_dev->dev_ops = &hns3vf_eth_dev_ops; @@ -2925,8 +2684,8 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) "process, ret = %d", ret); goto err_mp_init_secondary; } - hw->secondary_cnt++; + hns3_tx_push_init(eth_dev); return 0; } @@ -2948,6 +2707,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) goto err_init_reset; hw->reset.ops = &hns3vf_reset_ops; + hns3vf_init_hw_ops(hw); ret = hns3vf_init_vf(eth_dev); if (ret) { PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret); @@ -3014,8 +2774,6 @@ err_mp_init_secondary: eth_dev->tx_pkt_burst = NULL; eth_dev->tx_pkt_prepare = NULL; eth_dev->tx_descriptor_status = NULL; - rte_free(eth_dev->process_private); - eth_dev->process_private = NULL; return ret; } @@ -3028,11 +2786,8 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); - if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - rte_free(eth_dev->process_private); - eth_dev->process_private = NULL; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - } if (hw->adapter_state < HNS3_NIC_CLOSING) hns3vf_dev_close(eth_dev); @@ -3074,4 +2829,6 @@ RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map); RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci"); RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf, HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " - HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "); + HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " + HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> " + HNS3_DEVARG_MBX_TIME_LIMIT_MS "= ");