X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_ethdev_vf.c;h=fd20c522dc3315bd446c8f73f3ba786b77c5d1d7;hb=2b6d6d71a0992220043b2f5c3b885c486e7921b7;hp=2b1de8d325de407f59062f559b3ef6d92f128e61;hpb=62024eb8275696bead35b38a6062a2513f1f7c58;p=dpdk.git diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 2b1de8d325..fd20c522dc 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -2,29 +2,10 @@ * Copyright(c) 2018-2019 Hisilicon Limited. */ -#include -#include -#include -#include -#include -#include -#include #include - #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include #include #include @@ -167,8 +148,12 @@ hns3vf_enable_msix(const struct rte_pci_device *device, bool op) control |= PCI_MSIX_FLAGS_ENABLE; else control &= ~PCI_MSIX_FLAGS_ENABLE; - rte_pci_write_config(device, &control, sizeof(control), - (pos + PCI_MSIX_FLAGS)); + ret = rte_pci_write_config(device, &control, sizeof(control), + (pos + PCI_MSIX_FLAGS)); + if (ret < 0) { + PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x", + (pos + PCI_MSIX_FLAGS)); + } return 0; } return -ENXIO; @@ -185,7 +170,7 @@ hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, NULL, 0); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "failed to add uc mac addr(%s), ret = %d", mac_str, ret); @@ -205,7 +190,7 @@ hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, NULL, 0); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "failed to add uc mac addr(%s), ret = %d", mac_str, ret); @@ -225,7 +210,7 @@ hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) addr = &hw->mc_addrs[i]; /* Check if there are duplicate addresses */ if (rte_is_same_ether_addr(addr, mac_addr)) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, "failed to add mc mac addr, same addrs" "(%s) is added by the set_mc_mac_addr_list " @@ -236,7 +221,7 @@ hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) ret = hns3vf_add_mc_mac_addr(hw, mac_addr); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", mac_str, ret); @@ -271,7 +256,7 @@ hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, rte_spinlock_unlock(&hw->lock); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, ret); @@ -298,7 +283,7 @@ hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) rte_spinlock_unlock(&hw->lock); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, ret); @@ -339,12 +324,12 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, * -EPREM to VF driver through mailbox. */ if (ret == -EPERM) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, old_addr); hns3_warn(hw, "Has permanet mac addr(%s) for vf", mac_str); } else { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", mac_str, ret); @@ -381,7 +366,7 @@ hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del) if (ret) { err = ret; - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, "failed to %s mac addr(%s) index:%d " "ret = %d.", del ? "remove" : "restore", @@ -403,7 +388,7 @@ hns3vf_add_mc_mac_addr(struct hns3_hw *hw, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, NULL, 0); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d", mac_str, ret); @@ -424,7 +409,7 @@ hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, NULL, 0); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d", mac_str, ret); @@ -444,7 +429,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, uint32_t j; if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { - hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) " + hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " "invalid. valid range: 0~%d", nb_mc_addr, HNS3_MC_MACADDR_NUM); return -EINVAL; @@ -454,7 +439,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, for (i = 0; i < nb_mc_addr; i++) { addr = &mc_addr_set[i]; if (!rte_is_multicast_ether_addr(addr)) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, "failed to set mc mac addr, addr(%s) invalid.", @@ -465,7 +450,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, /* Check if there are duplicate addresses */ for (j = i + 1; j < nb_mc_addr; j++) { if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { - rte_ether_format_addr(mac_str, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, "failed to set mc mac addr, " @@ -482,7 +467,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, for (j = 0; j < HNS3_VF_UC_MACADDR_NUM; j++) { if (rte_is_same_ether_addr(addr, &hw->data->mac_addrs[j])) { - rte_ether_format_addr(mac_str, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, "failed to set mc mac addr, " @@ -565,7 +550,7 @@ hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) ret = hns3vf_add_mc_mac_addr(hw, addr); if (ret) { err = ret; - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d", del ? "Remove" : "Restore", mac_str, ret); @@ -610,6 +595,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, req->msg[1] = en_bc_pmc ? 1 : 0; req->msg[2] = en_uc_pmc ? 1 : 0; req->msg[3] = en_mc_pmc ? 1 : 0; + req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; ret = hns3_cmd_send(hw, &desc, 1); if (ret) @@ -720,7 +706,7 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg, sizeof(bind_msg), false, NULL, 0); if (ret) - hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.", + hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.", op_str, queue_id, bind_msg.vector_id, ret); return ret; @@ -756,13 +742,17 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw) hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, HNS3_TQP_INTR_GL_DEFAULT); hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); + /* + * QL(quantity limiter) is not used currently, just set 0 to + * close it. + */ hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); ret = hns3vf_bind_ring_with_vector(hw, vec, false, HNS3_RING_TYPE_TX, i); if (ret) { PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with " - "vector: %d, ret=%d", i, vec, ret); + "vector: %u, ret=%d", i, vec, ret); return ret; } @@ -770,7 +760,7 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw) HNS3_RING_TYPE_RX, i); if (ret) { PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with " - "vector: %d, ret=%d", i, vec, ret); + "vector: %u, ret=%d", i, vec, ret); return ret; } } @@ -783,12 +773,12 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - struct hns3_rss_conf *rss_cfg = &hw->rss_info; struct rte_eth_conf *conf = &dev->data->dev_conf; enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; uint16_t nb_rx_q = dev->data->nb_rx_queues; uint16_t nb_tx_q = dev->data->nb_tx_queues; struct rte_eth_rss_conf rss_conf; + uint32_t max_rx_pkt_len; uint16_t mtu; bool gro_en; int ret; @@ -826,11 +816,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; hw->rss_dis_flag = false; rss_conf = conf->rx_adv_conf.rss_conf; - if (rss_conf.rss_key == NULL) { - rss_conf.rss_key = rss_cfg->key; - rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE; - } - ret = hns3_dev_rss_hash_update(dev, &rss_conf); if (ret) goto cfg_err; @@ -841,12 +826,18 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) * according to the maximum RX packet length. */ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - /* - * Security of max_rx_pkt_len is guaranteed in dpdk frame. - * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it - * can safely assign to "uint16_t" type variable. - */ - mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len); + max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; + if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || + max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { + hns3_err(hw, "maximum Rx packet length must be greater " + "than %u and less than %u when jumbo frame enabled.", + (uint16_t)HNS3_DEFAULT_FRAME_LEN, + (uint16_t)HNS3_MAX_FRAME_LEN); + ret = -EINVAL; + goto cfg_err; + } + + mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); ret = hns3vf_dev_mtu_set(dev, mtu); if (ret) goto cfg_err; @@ -908,7 +899,7 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) * MTU value issued by hns3 VF PMD driver must be less than or equal to * PF's MTU. */ - if (rte_atomic16_read(&hw->reset.resetting)) { + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { hns3_err(hw, "Failed to set mtu during resetting"); return -EIO; } @@ -938,7 +929,7 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) rte_spinlock_unlock(&hw->lock); return ret; } - if (frame_size > RTE_ETHER_MAX_LEN) + if (mtu > RTE_ETHER_MTU) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else @@ -1032,7 +1023,7 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) info->vmdq_queue_num = 0; - info->reta_size = HNS3_RSS_IND_TBL_SIZE; + info->reta_size = hw->rss_ind_tbl_size; info->hash_key_size = HNS3_RSS_KEY_SIZE; info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; @@ -1075,7 +1066,7 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING); hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg); hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); val = hns3_read_dev(hw, HNS3_VF_RST_ING); hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); @@ -1148,6 +1139,7 @@ hns3vf_set_default_dev_specifications(struct hns3_hw *hw) hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; hw->rss_key_size = HNS3_RSS_KEY_SIZE; + hw->intr.int_ql_max = HNS3_INTR_QL_NONE; } static void @@ -1160,6 +1152,21 @@ hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); + hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); +} + +static int +hns3vf_check_dev_specifications(struct hns3_hw *hw) +{ + if (hw->rss_ind_tbl_size == 0 || + hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { + hns3_warn(hw, "the size of hash lookup table configured (%u)" + " exceeds the maximum(%u)", hw->rss_ind_tbl_size, + HNS3_RSS_IND_TBL_SIZE_MAX); + return -EINVAL; + } + + return 0; } static int @@ -1182,7 +1189,7 @@ hns3vf_query_dev_specifications(struct hns3_hw *hw) hns3vf_parse_dev_specifications(hw, desc); - return 0; + return hns3vf_check_dev_specifications(hw); } static int @@ -1209,10 +1216,11 @@ hns3vf_get_capability(struct hns3_hw *hw) if (revision < PCI_REVISION_ID_HIP09_A) { hns3vf_set_default_dev_specifications(hw); hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; - hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL; hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; + hw->rss_info.ipv6_sctp_offload_supported = false; + hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE; return 0; } @@ -1225,10 +1233,11 @@ hns3vf_get_capability(struct hns3_hw *hw) } hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; - hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL; hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; + hw->rss_info.ipv6_sctp_offload_supported = true; + hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE; return 0; } @@ -1341,7 +1350,7 @@ hns3vf_get_tc_info(struct hns3_hw *hw) { uint8_t resp_msg; int ret; - int i; + uint32_t i; ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0, true, &resp_msg, sizeof(resp_msg)); @@ -1424,13 +1433,13 @@ hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q, struct hns3_hw *hw = &hns->hw; if (nb_rx_q < hw->num_tc) { - hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).", + hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).", nb_rx_q, hw->num_tc); return -EINVAL; } if (nb_tx_q < hw->num_tc) { - hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).", + hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).", nb_tx_q, hw->num_tc); return -EINVAL; } @@ -1444,7 +1453,7 @@ hns3vf_request_link_info(struct hns3_hw *hw) uint8_t resp_msg; int ret; - if (rte_atomic16_read(&hw->reset.resetting)) + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) return; ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, &resp_msg, sizeof(resp_msg)); @@ -1452,6 +1461,41 @@ hns3vf_request_link_info(struct hns3_hw *hw) hns3_err(hw, "Failed to fetch link status from PF: %d", ret); } +void +hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + uint32_t link_speed, uint8_t link_duplex) +{ + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + struct hns3_mac *mac = &hw->mac; + bool report_lse; + bool changed; + + changed = mac->link_status != link_status || + mac->link_speed != link_speed || + mac->link_duplex != link_duplex; + if (!changed) + return; + + /* + * VF's link status/speed/duplex were updated by polling from PF driver, + * because the link status/speed/duplex may be changed in the polling + * interval, so driver will report lse (lsc event) once any of the above + * thress variables changed. + * But if the PF's link status is down and driver saved link status is + * also down, there are no need to report lse. + */ + report_lse = true; + if (link_status == ETH_LINK_DOWN && link_status == mac->link_status) + report_lse = false; + + mac->link_status = link_status; + mac->link_speed = link_speed; + mac->link_duplex = link_duplex; + + if (report_lse) + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); +} + static int hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) { @@ -1477,7 +1521,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) struct hns3_hw *hw = &hns->hw; int ret; - if (rte_atomic16_read(&hw->reset.resetting)) { + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { hns3_err(hw, "vf set vlan id failed during resetting, vlan_id =%u", vlan_id); @@ -1516,7 +1560,7 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask) unsigned int tmp_mask; int ret = 0; - if (rte_atomic16_read(&hw->reset.resetting)) { + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { hns3_err(hw, "vf set vlan offload failed during resetting, " "mask = 0x%x", mask); return -EIO; @@ -1755,7 +1799,6 @@ hns3vf_init_hardware(struct hns3_adapter *hns) goto err_init_hardware; } - hns3vf_request_link_info(hw); return 0; err_init_hardware: @@ -1845,7 +1888,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) if (ret) goto err_set_tc_queue; - hns3_set_default_rss_args(hw); + hns3_rss_set_default_args(hw); return 0; @@ -1898,7 +1941,18 @@ hns3vf_do_stop(struct hns3_adapter *hns) hw->mac.link_status = ETH_LINK_DOWN; - if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) { + /* + * The "hns3vf_do_stop" function will also be called by .stop_service to + * prepare reset. At the time of global or IMP reset, the command cannot + * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be + * accessed during the reset process. So the mbuf can not be released + * during reset and is required to be released after the reset is + * completed. + */ + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) + hns3_dev_release_mbufs(hns); + + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { hns3vf_configure_mac_addr(hns, true); ret = hns3_reset_all_tqps(hns); if (ret) { @@ -1963,11 +2017,10 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) rte_delay_ms(hw->tqps_num); rte_spinlock_lock(&hw->lock); - if (rte_atomic16_read(&hw->reset.resetting) == 0) { + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { hns3_stop_tqps(hw); hns3vf_do_stop(hns); hns3vf_unmap_rx_interrupt(dev); - hns3_dev_release_mbufs(hns); hw->adapter_state = HNS3_NIC_CONFIGURED; } hns3_rx_scattered_reset(dev); @@ -1984,8 +2037,11 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) struct hns3_hw *hw = &hns->hw; int ret = 0; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; return 0; + } if (hw->adapter_state == HNS3_NIC_STARTED) ret = hns3vf_dev_stop(eth_dev); @@ -2002,7 +2058,7 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) rte_free(eth_dev->process_private); eth_dev->process_private = NULL; hns3_mp_uninit_primary(); - hns3_warn(hw, "Close port %d finished", hw->data->port_id); + hns3_warn(hw, "Close port %u finished", hw->data->port_id); return ret; } @@ -2079,6 +2135,8 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) if (ret) return ret; + hns3_enable_rxd_adv_layout(hw); + ret = hns3_init_queues(hns, reset_queue); if (ret) hns3_err(hw, "failed to init queues, ret = %d.", ret); @@ -2098,26 +2156,27 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) uint16_t q_id; int ret; - if (dev->data->dev_conf.intr_conf.rxq == 0) + /* + * hns3 needs a separate interrupt to be used as event interrupt which + * could not be shared with task queue pair, so KERNEL drivers need + * support multiple interrupt vectors. + */ + if (dev->data->dev_conf.intr_conf.rxq == 0 || + !rte_intr_cap_multiple(intr_handle)) return 0; - /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); + intr_vector = hw->used_rx_queues; + /* It creates event fd for each intr vector when MSIX is used */ + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -EINVAL; - /* check and configure queue intr-vector mapping */ - if (rte_intr_cap_multiple(intr_handle) || - !RTE_ETH_DEV_SRIOV(dev).active) { - intr_vector = hw->used_rx_queues; - /* It creates event fd for each intr vector when MSIX is used */ - if (rte_intr_efd_enable(intr_handle, intr_vector)) - return -EINVAL; - } - if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + if (intr_handle->intr_vec == NULL) { intr_handle->intr_vec = rte_zmalloc("intr_vec", hw->used_rx_queues * sizeof(int), 0); if (intr_handle->intr_vec == NULL) { - hns3_err(hw, "Failed to allocate %d rx_queues" + hns3_err(hw, "Failed to allocate %u rx_queues" " intr_vec", hw->used_rx_queues); ret = -ENOMEM; goto vf_alloc_intr_vec_error; @@ -2128,28 +2187,26 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) vec = RTE_INTR_VEC_RXTX_OFFSET; base = RTE_INTR_VEC_RXTX_OFFSET; } - if (rte_intr_dp_is_en(intr_handle)) { - for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { - ret = hns3vf_bind_ring_with_vector(hw, vec, true, - HNS3_RING_TYPE_RX, - q_id); - if (ret) - goto vf_bind_vector_error; - intr_handle->intr_vec[q_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; - } + + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hns3vf_bind_ring_with_vector(hw, vec, true, + HNS3_RING_TYPE_RX, q_id); + if (ret) + goto vf_bind_vector_error; + intr_handle->intr_vec[q_id] = vec; + /* + * If there are not enough efds (e.g. not enough interrupt), + * remaining queues will be bond to the last interrupt. + */ + if (vec < base + intr_handle->nb_efd - 1) + vec++; } rte_intr_enable(intr_handle); return 0; vf_bind_vector_error: - rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec) { - free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } - return ret; + free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; vf_alloc_intr_vec_error: rte_intr_efd_disable(intr_handle); return ret; @@ -2194,7 +2251,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) int ret; PMD_INIT_FUNC_TRACE(); - if (rte_atomic16_read(&hw->reset.resetting)) + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) return -EBUSY; rte_spinlock_lock(&hw->lock); @@ -2206,11 +2263,8 @@ hns3vf_dev_start(struct rte_eth_dev *dev) return ret; } ret = hns3vf_map_rx_interrupt(dev); - if (ret) { - hw->adapter_state = HNS3_NIC_CONFIGURED; - rte_spinlock_unlock(&hw->lock); - return ret; - } + if (ret) + goto map_rx_inter_err; /* * There are three register used to control the status of a TQP @@ -2224,19 +2278,12 @@ hns3vf_dev_start(struct rte_eth_dev *dev) * status of queue in the dpdk framework. */ ret = hns3_start_all_txqs(dev); - if (ret) { - hw->adapter_state = HNS3_NIC_CONFIGURED; - rte_spinlock_unlock(&hw->lock); - return ret; - } + if (ret) + goto map_rx_inter_err; ret = hns3_start_all_rxqs(dev); - if (ret) { - hns3_stop_all_txqs(dev); - hw->adapter_state = HNS3_NIC_CONFIGURED; - rte_spinlock_unlock(&hw->lock); - return ret; - } + if (ret) + goto start_all_rxqs_fail; hw->adapter_state = HNS3_NIC_STARTED; rte_spinlock_unlock(&hw->lock); @@ -2244,7 +2291,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) hns3_rx_scattered_calc(dev); hns3_set_rxtx_function(dev); hns3_mp_req_start_rxtx(dev); - rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev); + hns3vf_service_handler(dev); hns3vf_restore_filter(dev); @@ -2257,6 +2304,15 @@ hns3vf_dev_start(struct rte_eth_dev *dev) */ hns3_start_tqps(hw); + return ret; + +start_all_rxqs_fail: + hns3_stop_all_txqs(dev); +map_rx_inter_err: + (void)hns3vf_do_stop(hns); + hw->adapter_state = HNS3_NIC_CONFIGURED; + rte_spinlock_unlock(&hw->lock); + return ret; } @@ -2366,15 +2422,17 @@ static int hns3vf_prepare_reset(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; - int ret = 0; + int ret; if (hw->reset.level == HNS3_VF_FUNC_RESET) { ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL, 0, true, NULL, 0); + if (ret) + return ret; } - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); - return ret; + return 0; } static int @@ -2384,8 +2442,11 @@ hns3vf_stop_service(struct hns3_adapter *hns) struct rte_eth_dev *eth_dev; eth_dev = &rte_eth_devices[hw->data->port_id]; - if (hw->adapter_state == HNS3_NIC_STARTED) + if (hw->adapter_state == HNS3_NIC_STARTED) { rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev); + hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed, + hw->mac.link_duplex); + } hw->mac.link_status = ETH_LINK_DOWN; hns3_set_rxtx_function(eth_dev); @@ -2408,7 +2469,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) * from table space. Hence, for function reset software intervention is * required to delete the entries. */ - if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) hns3vf_configure_all_mc_mac_addr(hns, true); rte_spinlock_unlock(&hw->lock); @@ -2429,6 +2490,11 @@ hns3vf_start_service(struct hns3_adapter *hns) /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); + /* + * Enable state of each rxq and txq will be recovered after + * reset, so we need to restore them before enable all tqps; + */ + hns3_restore_tqp_enable_state(hw); /* * When finished the initialization, enable queues to receive * and transmit packets. @@ -2469,7 +2535,7 @@ hns3vf_check_default_mac_change(struct hns3_hw *hw) ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac); if (!ret) { rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]); - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, &hw->data->mac_addrs[0]); hns3_warn(hw, "Default MAC address has been changed to:" " %s by the host PF kernel ethdev driver", @@ -2575,8 +2641,10 @@ hns3vf_reset_service(void *param) * The interrupt may have been lost. It is necessary to handle * the interrupt to recover from the error. */ - if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) { - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_DEFERRED) { + __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, + __ATOMIC_RELAXED); hns3_err(hw, "Handling interrupts in delayed tasks"); hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]); reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending); @@ -2585,7 +2653,7 @@ hns3vf_reset_service(void *param) hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); } } - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE); + __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); /* * Hardware reset has been notified, we now have to poll & check if @@ -2617,7 +2685,7 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) if (hw->reset.level == HNS3_VF_FULL_RESET) { rte_intr_disable(&pci_dev->intr_handle); ret = hns3vf_set_bus_master(pci_dev, true); - if (ret) { + if (ret < 0) { hns3_err(hw, "failed to set pci bus, ret = %d", ret); return ret; } @@ -2706,6 +2774,7 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = { .vlan_offload_set = hns3vf_vlan_offload_set, .get_reg = hns3_get_regs, .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, + .tx_done_cleanup = hns3_tx_done_cleanup, }; static const struct hns3_reset_ops hns3vf_reset_ops = { @@ -2741,6 +2810,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) hns3_set_rxtx_function(eth_dev); eth_dev->dev_ops = &hns3vf_eth_dev_ops; + eth_dev->rx_queue_count = hns3_rx_queue_count; if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ret = hns3_mp_init_secondary(); if (ret) { @@ -2807,7 +2877,8 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) hw->adapter_state = HNS3_NIC_INITIALIZED; - if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) { + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_PENDING) { hns3_err(hw, "Reschedule reset service after dev_init"); hns3_schedule_reset(hns); } else { @@ -2847,8 +2918,11 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; + return 0; + } if (hw->adapter_state < HNS3_NIC_CLOSING) hns3vf_dev_close(eth_dev); @@ -2875,7 +2949,7 @@ eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev) static const struct rte_pci_id pci_id_hns3vf_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) }, - { .vendor_id = 0, /* sentinel */ }, + { .vendor_id = 0, }, /* sentinel */ }; static struct rte_pci_driver rte_hns3vf_pmd = {