X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_ethdev_vf.c;h=8d9b7979c806d1687b61d5ba496ef31d418b4dad;hb=70d2f42110576c670a67abd35a4bb6a2d0b00980;hp=6551940629302a401127759da9cacfeb72a1e4fc;hpb=091a0f95b5cd7f1159a6b23ac8fda97b53088b06;p=dpdk.git diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 6551940629..8d9b7979c8 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -1,30 +1,11 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018-2019 Hisilicon Limited. + * Copyright(c) 2018-2021 HiSilicon Limited. */ -#include -#include -#include -#include -#include -#include -#include #include - #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include #include #include @@ -63,13 +44,22 @@ static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr); static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr); +static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev, + __rte_unused int wait_to_complete); + /* set PCI bus mastering */ -static void +static int hns3vf_set_bus_master(const struct rte_pci_device *device, bool op) { uint16_t reg; + int ret; - rte_pci_read_config(device, ®, sizeof(reg), PCI_COMMAND); + ret = rte_pci_read_config(device, ®, sizeof(reg), PCI_COMMAND); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_COMMAND); + return ret; + } if (op) /* set the master bit */ @@ -77,7 +67,7 @@ hns3vf_set_bus_master(const struct rte_pci_device *device, bool op) else reg &= ~(PCI_COMMAND_MASTER); - rte_pci_write_config(device, ®, sizeof(reg), PCI_COMMAND); + return rte_pci_write_config(device, ®, sizeof(reg), PCI_COMMAND); } /** @@ -94,16 +84,34 @@ hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap) uint8_t pos; uint8_t id; int ttl; + int ret; + + ret = rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_STATUS); + return 0; + } - rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS); if (!(status & PCI_STATUS_CAP_LIST)) return 0; ttl = MAX_PCIE_CAPABILITY; - rte_pci_read_config(device, &pos, sizeof(pos), PCI_CAPABILITY_LIST); + ret = rte_pci_read_config(device, &pos, sizeof(pos), + PCI_CAPABILITY_LIST); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_CAPABILITY_LIST); + return 0; + } + while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) { - rte_pci_read_config(device, &id, sizeof(id), - (pos + PCI_CAP_LIST_ID)); + ret = rte_pci_read_config(device, &id, sizeof(id), + (pos + PCI_CAP_LIST_ID)); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", + (pos + PCI_CAP_LIST_ID)); + break; + } if (id == 0xFF) break; @@ -111,8 +119,13 @@ hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap) if (id == cap) return (int)pos; - rte_pci_read_config(device, &pos, sizeof(pos), - (pos + PCI_CAP_LIST_NEXT)); + ret = rte_pci_read_config(device, &pos, sizeof(pos), + (pos + PCI_CAP_LIST_NEXT)); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", + (pos + PCI_CAP_LIST_NEXT)); + break; + } } return 0; } @@ -122,19 +135,33 @@ hns3vf_enable_msix(const struct rte_pci_device *device, bool op) { uint16_t control; int pos; + int ret; pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX); if (pos) { - rte_pci_read_config(device, &control, sizeof(control), + ret = rte_pci_read_config(device, &control, sizeof(control), (pos + PCI_MSIX_FLAGS)); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", + (pos + PCI_MSIX_FLAGS)); + return -ENXIO; + } + if (op) control |= PCI_MSIX_FLAGS_ENABLE; else control &= ~PCI_MSIX_FLAGS_ENABLE; - rte_pci_write_config(device, &control, sizeof(control), - (pos + PCI_MSIX_FLAGS)); + ret = rte_pci_write_config(device, &control, sizeof(control), + (pos + PCI_MSIX_FLAGS)); + if (ret < 0) { + PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x", + (pos + PCI_MSIX_FLAGS)); + return -ENXIO; + } + return 0; } + return -ENXIO; } @@ -149,7 +176,7 @@ hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, NULL, 0); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "failed to add uc mac addr(%s), ret = %d", mac_str, ret); @@ -169,7 +196,7 @@ hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, NULL, 0); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "failed to add uc mac addr(%s), ret = %d", mac_str, ret); @@ -189,7 +216,7 @@ hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) addr = &hw->mc_addrs[i]; /* Check if there are duplicate addresses */ if (rte_is_same_ether_addr(addr, mac_addr)) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, "failed to add mc mac addr, same addrs" "(%s) is added by the set_mc_mac_addr_list " @@ -200,7 +227,7 @@ hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) ret = hns3vf_add_mc_mac_addr(hw, mac_addr); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", mac_str, ret); @@ -235,7 +262,7 @@ hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, rte_spinlock_unlock(&hw->lock); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, ret); @@ -262,7 +289,7 @@ hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) rte_spinlock_unlock(&hw->lock); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, ret); @@ -303,12 +330,12 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, * -EPREM to VF driver through mailbox. */ if (ret == -EPERM) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, old_addr); hns3_warn(hw, "Has permanet mac addr(%s) for vf", mac_str); } else { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", mac_str, ret); @@ -345,7 +372,7 @@ hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del) if (ret) { err = ret; - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, "failed to %s mac addr(%s) index:%d " "ret = %d.", del ? "remove" : "restore", @@ -367,7 +394,7 @@ hns3vf_add_mc_mac_addr(struct hns3_hw *hw, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, NULL, 0); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d", mac_str, ret); @@ -388,7 +415,7 @@ hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, NULL, 0); if (ret) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d", mac_str, ret); @@ -408,7 +435,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, uint32_t j; if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { - hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) " + hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " "invalid. valid range: 0~%d", nb_mc_addr, HNS3_MC_MACADDR_NUM); return -EINVAL; @@ -418,7 +445,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, for (i = 0; i < nb_mc_addr; i++) { addr = &mc_addr_set[i]; if (!rte_is_multicast_ether_addr(addr)) { - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, "failed to set mc mac addr, addr(%s) invalid.", @@ -429,7 +456,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, /* Check if there are duplicate addresses */ for (j = i + 1; j < nb_mc_addr; j++) { if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { - rte_ether_format_addr(mac_str, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, "failed to set mc mac addr, " @@ -446,7 +473,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, for (j = 0; j < HNS3_VF_UC_MACADDR_NUM; j++) { if (rte_is_same_ether_addr(addr, &hw->data->mac_addrs[j])) { - rte_ether_format_addr(mac_str, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, "failed to set mc mac addr, " @@ -529,7 +556,7 @@ hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) ret = hns3vf_add_mc_mac_addr(hw, addr); if (ret) { err = ret; - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d", del ? "Remove" : "Restore", mac_str, ret); @@ -574,6 +601,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, req->msg[1] = en_bc_pmc ? 1 : 0; req->msg[2] = en_uc_pmc ? 1 : 0; req->msg[3] = en_mc_pmc ? 1 : 0; + req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; ret = hns3_cmd_send(hw, &desc, 1); if (ret) @@ -684,7 +712,7 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg, sizeof(bind_msg), false, NULL, 0); if (ret) - hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.", + hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.", op_str, queue_id, bind_msg.vector_id, ret); return ret; @@ -693,7 +721,7 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, static int hns3vf_init_ring_with_vector(struct hns3_hw *hw) { - uint8_t vec; + uint16_t vec; int ret; int i; @@ -704,33 +732,33 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw) * vector. In the initialization clearing the all hardware mapping * relationship configurations between queues and interrupt vectors is * needed, so some error caused by the residual configurations, such as - * the unexpected Tx interrupt, can be avoid. Because of the hardware - * constraints in hns3 hardware engine, we have to implement clearing - * the mapping relationship configurations by binding all queues to the - * last interrupt vector and reserving the last interrupt vector. This - * method results in a decrease of the maximum queues when upper - * applications call the rte_eth_dev_configure API function to enable - * Rx interrupt. + * the unexpected Tx interrupt, can be avoid. */ vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ - /* vec - 1: the last interrupt is reserved */ - hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1; + if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE) + vec = vec - 1; /* the last interrupt is reserved */ + hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); for (i = 0; i < hw->intr_tqps_num; i++) { /* - * Set gap limiter and rate limiter configuration of queue's - * interrupt. + * Set gap limiter/rate limiter/quanity limiter algorithm + * configuration for interrupt coalesce of queue's interrupt. */ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, HNS3_TQP_INTR_GL_DEFAULT); hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, HNS3_TQP_INTR_GL_DEFAULT); hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); + /* + * QL(quantity limiter) is not used currently, just set 0 to + * close it. + */ + hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT); ret = hns3vf_bind_ring_with_vector(hw, vec, false, HNS3_RING_TYPE_TX, i); if (ret) { PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with " - "vector: %d, ret=%d", i, vec, ret); + "vector: %u, ret=%d", i, vec, ret); return ret; } @@ -738,7 +766,7 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw) HNS3_RING_TYPE_RX, i); if (ret) { PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with " - "vector: %d, ret=%d", i, vec, ret); + "vector: %u, ret=%d", i, vec, ret); return ret; } } @@ -749,30 +777,34 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw) static int hns3vf_dev_configure(struct rte_eth_dev *dev) { - struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct hns3_rss_conf *rss_cfg = &hw->rss_info; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; struct rte_eth_conf *conf = &dev->data->dev_conf; enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; uint16_t nb_rx_q = dev->data->nb_rx_queues; uint16_t nb_tx_q = dev->data->nb_tx_queues; struct rte_eth_rss_conf rss_conf; + uint32_t max_rx_pkt_len; uint16_t mtu; bool gro_en; int ret; + hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); + /* - * Hardware does not support individually enable/disable/reset the Tx or - * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx - * and Rx queues at the same time. When the numbers of Tx queues - * allocated by upper applications are not equal to the numbers of Rx - * queues, driver needs to setup fake Tx or Rx queues to adjust numbers - * of Tx/Rx queues. otherwise, network engine can not work as usual. But - * these fake queues are imperceptible, and can not be used by upper - * applications. + * Some versions of hardware network engine does not support + * individually enable/disable/reset the Tx or Rx queue. These devices + * must enable/disable/reset Tx and Rx queues at the same time. When the + * numbers of Tx queues allocated by upper applications are not equal to + * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues + * to adjust numbers of Tx/Rx queues. otherwise, network engine can not + * work as usual. But these fake queues are imperceptible, and can not + * be used by upper applications. */ ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); if (ret) { - hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret); + hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret); + hw->cfg_max_queues = 0; return ret; } @@ -786,12 +818,8 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) /* When RSS is not configured, redirect the packet queue 0 */ if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + hw->rss_dis_flag = false; rss_conf = conf->rx_adv_conf.rss_conf; - if (rss_conf.rss_key == NULL) { - rss_conf.rss_key = rss_cfg->key; - rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE; - } - ret = hns3_dev_rss_hash_update(dev, &rss_conf); if (ret) goto cfg_err; @@ -802,12 +830,18 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) * according to the maximum RX packet length. */ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - /* - * Security of max_rx_pkt_len is guaranteed in dpdk frame. - * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it - * can safely assign to "uint16_t" type variable. - */ - mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len); + max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; + if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || + max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { + hns3_err(hw, "maximum Rx packet length must be greater " + "than %u and less than %u when jumbo frame enabled.", + (uint16_t)HNS3_DEFAULT_FRAME_LEN, + (uint16_t)HNS3_MAX_FRAME_LEN); + ret = -EINVAL; + goto cfg_err; + } + + mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); ret = hns3vf_dev_mtu_set(dev, mtu); if (ret) goto cfg_err; @@ -824,10 +858,13 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) if (ret) goto cfg_err; + hns3_init_rx_ptype_tble(dev); + hw->adapter_state = HNS3_NIC_CONFIGURED; return 0; cfg_err: + hw->cfg_max_queues = 0; (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); hw->adapter_state = HNS3_NIC_INITIALIZED; @@ -862,18 +899,37 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) * MTU value issued by hns3 VF PMD driver must be less than or equal to * PF's MTU. */ - if (rte_atomic16_read(&hw->reset.resetting)) { + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { hns3_err(hw, "Failed to set mtu during resetting"); return -EIO; } + /* + * when Rx of scattered packets is off, we have some possibility of + * using vector Rx process function or simple Rx functions in hns3 PMD + * driver. If the input MTU is increased and the maximum length of + * received packets is greater than the length of a buffer for Rx + * packet, the hardware network engine needs to use multiple BDs and + * buffers to store these packets. This will cause problems when still + * using vector Rx process function or simple Rx function to receiving + * packets. So, when Rx of scattered packets is off and device is + * started, it is not permitted to increase MTU so that the maximum + * length of Rx packets is greater than Rx buffer length. + */ + if (dev->data->dev_started && !dev->data->scattered_rx && + frame_size > hw->rx_buf_len) { + hns3_err(hw, "failed to set mtu because current is " + "not scattered rx mode"); + return -EOPNOTSUPP; + } + rte_spinlock_lock(&hw->lock); ret = hns3vf_config_mtu(hw, mtu); if (ret) { rte_spinlock_unlock(&hw->lock); return ret; } - if (frame_size > RTE_ETHER_MAX_LEN) + if (mtu > RTE_ETHER_MTU) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else @@ -919,7 +975,6 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_TCP_LRO); - info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | @@ -930,9 +985,16 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO | - info->tx_queue_offload_capa | + DEV_TX_OFFLOAD_MBUF_FAST_FREE | hns3_txvlan_cap_get(hw)); + if (hns3_dev_outer_udp_cksum_supported(hw)) + info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; + + if (hns3_dev_indep_txrx_supported(hw)) + info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = HNS3_MAX_RING_DESC, .nb_min = HNS3_MIN_RING_DESC, @@ -944,23 +1006,32 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) .nb_min = HNS3_MIN_RING_DESC, .nb_align = HNS3_ALIGN_RING_DESC, .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT, - .nb_mtu_seg_max = HNS3_MAX_NON_TSO_BD_PER_PKT, + .nb_mtu_seg_max = hw->max_non_tso_bd_num, }; info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH, /* * If there are no available Rx buffer descriptors, incoming * packets are always dropped by hardware based on hns3 network * engine. */ .rx_drop_en = 1, + .offloads = 0, + }; + info->default_txconf = (struct rte_eth_txconf) { + .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH, + .offloads = 0, }; - info->vmdq_queue_num = 0; - - info->reta_size = HNS3_RSS_IND_TBL_SIZE; + info->reta_size = hw->rss_ind_tbl_size; info->hash_key_size = HNS3_RSS_KEY_SIZE; info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; + + info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; + info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; + info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; + info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; @@ -996,12 +1067,11 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) /* Fetch the events from their corresponding regs */ cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG); - if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) { rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING); hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg); hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); val = hns3_read_dev(hw, HNS3_VF_RST_ING); hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); @@ -1041,14 +1111,13 @@ hns3vf_interrupt_handler(void *param) enum hns3vf_evt_cause event_cause; uint32_t clearval; - if (hw->irq_thread_id == 0) - hw->irq_thread_id = pthread_self(); - /* Disable interrupt */ hns3vf_disable_irq0(hw); /* Read out interrupt causes */ event_cause = hns3vf_check_event_cause(hns, &clearval); + /* Clear interrupt causes */ + hns3vf_clear_event_cause(hw, clearval); switch (event_cause) { case HNS3VF_VECTOR0_EVENT_RST: @@ -1061,13 +1130,128 @@ hns3vf_interrupt_handler(void *param) break; } - /* Clear interrupt causes */ - hns3vf_clear_event_cause(hw, clearval); - /* Enable interrupt */ hns3vf_enable_irq0(hw); } +static void +hns3vf_set_default_dev_specifications(struct hns3_hw *hw) +{ + hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; + hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; + hw->rss_key_size = HNS3_RSS_KEY_SIZE; + hw->intr.int_ql_max = HNS3_INTR_QL_NONE; +} + +static void +hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) +{ + struct hns3_dev_specs_0_cmd *req0; + + req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; + + hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; + hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); + hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); + hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); +} + +static int +hns3vf_check_dev_specifications(struct hns3_hw *hw) +{ + if (hw->rss_ind_tbl_size == 0 || + hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { + hns3_warn(hw, "the size of hash lookup table configured (%u)" + " exceeds the maximum(%u)", hw->rss_ind_tbl_size, + HNS3_RSS_IND_TBL_SIZE_MAX); + return -EINVAL; + } + + return 0; +} + +static int +hns3vf_query_dev_specifications(struct hns3_hw *hw) +{ + struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; + int ret; + int i; + + for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, + true); + desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + } + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); + + ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); + if (ret) + return ret; + + hns3vf_parse_dev_specifications(hw, desc); + + return hns3vf_check_dev_specifications(hw); +} + +void +hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported) +{ + uint16_t val = supported ? HNS3_PF_PUSH_LSC_CAP_SUPPORTED : + HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED; + uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN; + struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); + + if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN) + __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0, + __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); +} + +static void +hns3vf_get_push_lsc_cap(struct hns3_hw *hw) +{ +#define HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS 500 + + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + int32_t remain_ms = HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS; + uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED; + uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN; + struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); + + __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN, + __ATOMIC_RELEASE); + + (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, + NULL, 0); + + while (remain_ms > 0) { + rte_delay_ms(HNS3_POLL_RESPONE_MS); + if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) != + HNS3_PF_PUSH_LSC_CAP_UNKNOWN) + break; + remain_ms--; + } + + /* + * When exit above loop, the pf_push_lsc_cap could be one of the three + * state: unknown (means pf not ack), not_supported, supported. + * Here config it as 'not_supported' when it's 'unknown' state. + */ + __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0, + __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); + + if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) == + HNS3_PF_PUSH_LSC_CAP_SUPPORTED) { + hns3_info(hw, "detect PF support push link status change!"); + } else { + /* + * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver + * declared RTE_PCI_DRV_INTR_LSC in drv_flags. So here cleared + * the RTE_ETH_DEV_INTR_LSC capability. + */ + dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; + } +} + static int hns3vf_get_capability(struct hns3_hw *hw) { @@ -1083,31 +1267,61 @@ hns3vf_get_capability(struct hns3_hw *hw) ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, HNS3_PCI_REVISION_ID); if (ret != HNS3_PCI_REVISION_ID_LEN) { - PMD_INIT_LOG(ERR, "failed to read pci revision id: %d", ret); + PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", + ret); return -EIO; } hw->revision = revision; + if (revision < PCI_REVISION_ID_HIP09_A) { + hns3vf_set_default_dev_specifications(hw); + hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; + hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; + hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; + hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1; + hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; + hw->rss_info.ipv6_sctp_offload_supported = false; + hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE; + return 0; + } + + ret = hns3vf_query_dev_specifications(hw); + if (ret) { + PMD_INIT_LOG(ERR, + "failed to query dev specifications, ret = %d", + ret); + return ret; + } + + hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; + hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; + hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; + hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; + hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; + hw->rss_info.ipv6_sctp_offload_supported = true; + hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE; + return 0; } static int hns3vf_check_tqp_info(struct hns3_hw *hw) { - uint16_t tqps_num; + if (hw->tqps_num == 0) { + PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF."); + return -EINVAL; + } - tqps_num = hw->tqps_num; - if (tqps_num > HNS3_MAX_TQP_NUM_PER_FUNC || tqps_num == 0) { - PMD_INIT_LOG(ERR, "Get invalid tqps_num(%u) from PF. valid " - "range: 1~%d", - tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC); + if (hw->rss_size_max == 0) { + PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF."); return -EINVAL; } - hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, hw->tqps_num); + hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num); return 0; } + static int hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw) { @@ -1193,21 +1407,47 @@ hns3vf_get_queue_depth(struct hns3_hw *hw) return 0; } +static void +hns3vf_update_caps(struct hns3_hw *hw, uint32_t caps) +{ + if (hns3_get_bit(caps, HNS3VF_CAPS_VLAN_FLT_MOD_B)) + hns3_set_bit(hw->capability, + HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 1); +} + static int -hns3vf_get_tc_info(struct hns3_hw *hw) +hns3vf_get_num_tc(struct hns3_hw *hw) { - uint8_t resp_msg; + uint8_t num_tc = 0; + uint32_t i; + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + if (hw->hw_tc_map & BIT(i)) + num_tc++; + } + return num_tc; +} + +static int +hns3vf_get_basic_info(struct hns3_hw *hw) +{ + uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE]; + struct hns3_basic_info *basic_info; int ret; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0, - true, &resp_msg, sizeof(resp_msg)); + ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0, + true, resp_msg, sizeof(resp_msg)); if (ret) { - hns3_err(hw, "VF request to get TC info from PF failed %d", - ret); + hns3_err(hw, "failed to get basic info from PF, ret = %d.", + ret); return ret; } - hw->hw_tc_map = resp_msg; + basic_info = (struct hns3_basic_info *)resp_msg; + hw->hw_tc_map = basic_info->hw_tc_map; + hw->num_tc = hns3vf_get_num_tc(hw); + hw->pf_vf_if_version = basic_info->pf_vf_if_version; + hns3vf_update_caps(hw, basic_info->caps); return 0; } @@ -1245,6 +1485,13 @@ hns3vf_get_configuration(struct hns3_hw *hw) return ret; } + hns3vf_get_push_lsc_cap(hw); + + /* Get basic info from PF */ + ret = hns3vf_get_basic_info(hw); + if (ret) + return ret; + /* Get queue configuration from PF */ ret = hns3vf_get_queue_info(hw); if (ret) @@ -1260,57 +1507,73 @@ hns3vf_get_configuration(struct hns3_hw *hw) if (ret) return ret; - ret = hns3vf_get_port_base_vlan_filter_state(hw); - if (ret) - return ret; - - /* Get tc configuration from PF */ - return hns3vf_get_tc_info(hw); + return hns3vf_get_port_base_vlan_filter_state(hw); } static int -hns3vf_set_tc_info(struct hns3_adapter *hns) +hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q, + uint16_t nb_tx_q) { struct hns3_hw *hw = &hns->hw; - uint16_t nb_rx_q = hw->data->nb_rx_queues; - uint16_t nb_tx_q = hw->data->nb_tx_queues; - uint8_t i; - hw->num_tc = 0; - for (i = 0; i < HNS3_MAX_TC_NUM; i++) - if (hw->hw_tc_map & BIT(i)) - hw->num_tc++; + return hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q); +} - if (nb_rx_q < hw->num_tc) { - hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).", - nb_rx_q, hw->num_tc); - return -EINVAL; - } +static void +hns3vf_request_link_info(struct hns3_hw *hw) +{ + struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); + bool send_req; + int ret; - if (nb_tx_q < hw->num_tc) { - hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).", - nb_tx_q, hw->num_tc); - return -EINVAL; - } + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) + return; - hns3_set_rss_size(hw, nb_rx_q); - hns3_tc_queue_mapping_cfg(hw, nb_tx_q); + send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED || + vf->req_link_info_cnt > 0; + if (!send_req) + return; - return 0; + ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, + NULL, 0); + if (ret) { + hns3_err(hw, "failed to fetch link status, ret = %d", ret); + return; + } + + if (vf->req_link_info_cnt > 0) + vf->req_link_info_cnt--; } -static void -hns3vf_request_link_info(struct hns3_hw *hw) +void +hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + uint32_t link_speed, uint8_t link_duplex) { - uint8_t resp_msg; + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); + struct hns3_mac *mac = &hw->mac; int ret; - if (rte_atomic16_read(&hw->reset.resetting)) + /* + * PF kernel driver may push link status when VF driver is in resetting, + * driver will stop polling job in this case, after resetting done + * driver will start polling job again. + * When polling job started, driver will get initial link status by + * sending request to PF kernel driver, then could update link status by + * process PF kernel driver's link status mailbox message. + */ + if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED)) return; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, - &resp_msg, sizeof(resp_msg)); - if (ret) - hns3_err(hw, "Failed to fetch link status from PF: %d", ret); + + if (hw->adapter_state != HNS3_NIC_STARTED) + return; + + mac->link_status = link_status; + mac->link_speed = link_speed; + mac->link_duplex = link_duplex; + ret = hns3vf_dev_link_update(dev, 0); + if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0) + hns3_start_report_lse(dev); } static int @@ -1338,7 +1601,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) struct hns3_hw *hw = &hns->hw; int ret; - if (rte_atomic16_read(&hw->reset.resetting)) { + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { hns3_err(hw, "vf set vlan id failed during resetting, vlan_id =%u", vlan_id); @@ -1354,6 +1617,26 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) return ret; } +static int +hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) +{ + uint8_t msg_data; + int ret; + + if (!hns3_dev_vf_vlan_flt_supported(hw)) + return 0; + + msg_data = enable ? 1 : 0; + ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, + HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data, + sizeof(msg_data), true, NULL, 0); + if (ret) + hns3_err(hw, "%s vlan filter failed, ret = %d.", + enable ? "enable" : "disable", ret); + + return ret; +} + static int hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable) { @@ -1364,7 +1647,8 @@ hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable) ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG, &msg_data, sizeof(msg_data), false, NULL, 0); if (ret) - hns3_err(hw, "vf enable strip failed, ret =%d", ret); + hns3_err(hw, "vf %s strip failed, ret = %d.", + enable ? "enable" : "disable", ret); return ret; } @@ -1377,13 +1661,26 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask) unsigned int tmp_mask; int ret = 0; - if (rte_atomic16_read(&hw->reset.resetting)) { + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { hns3_err(hw, "vf set vlan offload failed during resetting, " "mask = 0x%x", mask); return -EIO; } tmp_mask = (unsigned int)mask; + + if (tmp_mask & ETH_VLAN_FILTER_MASK) { + rte_spinlock_lock(&hw->lock); + /* Enable or disable VLAN filter */ + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + ret = hns3vf_en_vlan_filter(hw, true); + else + ret = hns3vf_en_vlan_filter(hw, false); + rte_spinlock_unlock(&hw->lock); + if (ret) + return ret; + } + /* Vlan stripping setting */ if (tmp_mask & ETH_VLAN_STRIP_MASK) { rte_spinlock_lock(&hw->lock); @@ -1481,9 +1778,10 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev) } /* Apply vlan offload setting */ - ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); + ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK | + ETH_VLAN_FILTER_MASK); if (ret) - hns3_err(hw, "dev config vlan offload failed, ret =%d", ret); + hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret); return ret; } @@ -1504,11 +1802,10 @@ hns3vf_keep_alive_handler(void *param) struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - uint8_t respmsg; int ret; ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0, - false, &respmsg, sizeof(uint8_t)); + false, NULL, 0); if (ret) hns3_err(hw, "VF sends keeping alive cmd failed(=%d)", ret); @@ -1526,8 +1823,8 @@ hns3vf_service_handler(void *param) /* * The query link status and reset processing are executed in the - * interrupt thread.When the IMP reset occurs, IMP will not respond, - * and the query operation will time out after 30ms. In the case of + * interrupt thread. When the IMP reset occurs, IMP will not respond, + * and the query operation will timeout after 30ms. In the case of * multiple PF/VFs, each query failure timeout causes the IMP reset * interrupt to fail to respond within 100ms. * Before querying the link status, check whether there is a reset @@ -1542,6 +1839,31 @@ hns3vf_service_handler(void *param) eth_dev); } +static void +hns3vf_start_poll_job(struct rte_eth_dev *dev) +{ +#define HNS3_REQUEST_LINK_INFO_REMAINS_CNT 3 + + struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED) + vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT; + + __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED); + + hns3vf_service_handler(dev); +} + +static void +hns3vf_stop_poll_job(struct rte_eth_dev *dev) +{ + struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + rte_eal_alarm_cancel(hns3vf_service_handler, dev); + + __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED); +} + static int hns3_query_vf_resource(struct hns3_hw *hw) { @@ -1559,7 +1881,7 @@ hns3_query_vf_resource(struct hns3_hw *hw) req = (struct hns3_vf_res_cmd *)desc.data; num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number), - HNS3_VEC_NUM_M, HNS3_VEC_NUM_S); + HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S); if (num_msi < HNS3_MIN_VECTOR_NUM) { hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)", num_msi, HNS3_MIN_VECTOR_NUM); @@ -1610,13 +1932,6 @@ hns3vf_init_hardware(struct hns3_adapter *hns) goto err_init_hardware; } - ret = hns3vf_set_alive(hw, true); - if (ret) { - PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret); - goto err_init_hardware; - } - - hns3vf_request_link_info(hw); return 0; err_init_hardware: @@ -1659,6 +1974,8 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) goto err_cmd_init; } + hns3_tx_push_init(eth_dev); + /* Get VF resource */ ret = hns3_query_vf_resource(hw); if (ret) @@ -1686,35 +2003,46 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) goto err_get_config; } - /* - * The hns3 PF ethdev driver in kernel support setting VF MAC address - * on the host by "ip link set ..." command. To avoid some incorrect - * scenes, for example, hns3 VF PMD driver fails to receive and send - * packets after user configure the MAC address by using the - * "ip link set ..." command, hns3 VF PMD driver keep the same MAC - * address strategy as the hns3 kernel ethdev driver in the - * initialization. If user configure a MAC address by the ip command - * for VF device, then hns3 VF PMD driver will start with it, otherwise - * start with a random MAC address in the initialization. - */ - ret = rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr); + ret = hns3_tqp_stats_init(hw); if (ret) - rte_eth_random_addr(hw->mac.mac_addr); + goto err_get_config; + + /* Hardware statistics of imissed registers cleared. */ + ret = hns3_update_imissed_stats(hw, true); + if (ret) { + hns3_err(hw, "clear imissed stats failed, ret = %d", ret); + goto err_set_tc_queue; + } + + ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num); + if (ret) { + PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret); + goto err_set_tc_queue; + } ret = hns3vf_clear_vport_list(hw); if (ret) { PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret); - goto err_get_config; + goto err_set_tc_queue; } ret = hns3vf_init_hardware(hns); if (ret) - goto err_get_config; + goto err_set_tc_queue; + + hns3_rss_set_default_args(hw); - hns3_set_default_rss_args(hw); + ret = hns3vf_set_alive(hw, true); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret); + goto err_set_tc_queue; + } return 0; +err_set_tc_queue: + hns3_tqp_stats_uninit(hw); + err_get_config: hns3vf_disable_irq0(hw); rte_intr_disable(&pci_dev->intr_handle); @@ -1743,6 +2071,8 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev) (void)hns3_config_gro(hw, false); (void)hns3vf_set_alive(hw, false); (void)hns3vf_set_promisc_mode(hw, false, false, false); + hns3_flow_uninit(eth_dev); + hns3_tqp_stats_uninit(hw); hns3vf_disable_irq0(hw); rte_intr_disable(&pci_dev->intr_handle); hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler, @@ -1756,16 +2086,31 @@ static int hns3vf_do_stop(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; - bool reset_queue; + int ret; hw->mac.link_status = ETH_LINK_DOWN; - if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) { + /* + * The "hns3vf_do_stop" function will also be called by .stop_service to + * prepare reset. At the time of global or IMP reset, the command cannot + * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be + * accessed during the reset process. So the mbuf can not be released + * during reset and is required to be released after the reset is + * completed. + */ + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) + hns3_dev_release_mbufs(hns); + + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { hns3vf_configure_mac_addr(hns, true); - reset_queue = true; - } else - reset_queue = false; - return hns3_stop_queues(hns, reset_queue); + ret = hns3_reset_all_tqps(hns); + if (ret) { + hns3_err(hw, "failed to reset all queues ret = %d", + ret); + return ret; + } + } + return 0; } static void @@ -1803,13 +2148,14 @@ hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev) } } -static void +static int hns3vf_dev_stop(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; PMD_INIT_FUNC_TRACE(); + dev->data->dev_started = 0; hw->adapter_state = HNS3_NIC_STOPPING; hns3_set_rxtx_function(dev); @@ -1817,30 +2163,35 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) /* Disable datapath on secondary process. */ hns3_mp_req_stop_rxtx(dev); /* Prevent crashes when queues are still in use. */ - rte_delay_ms(hw->tqps_num); + rte_delay_ms(hw->cfg_max_queues); rte_spinlock_lock(&hw->lock); - if (rte_atomic16_read(&hw->reset.resetting) == 0) { + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { + hns3_stop_tqps(hw); hns3vf_do_stop(hns); hns3vf_unmap_rx_interrupt(dev); - hns3_dev_release_mbufs(hns); hw->adapter_state = HNS3_NIC_CONFIGURED; } - rte_eal_alarm_cancel(hns3vf_service_handler, dev); + hns3_rx_scattered_reset(dev); + hns3vf_stop_poll_job(dev); + hns3_stop_report_lse(dev); rte_spinlock_unlock(&hw->lock); + + return 0; } -static void +static int hns3vf_dev_close(struct rte_eth_dev *eth_dev) { struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + int ret = 0; if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return; + return 0; if (hw->adapter_state == HNS3_NIC_STARTED) - hns3vf_dev_stop(eth_dev); + ret = hns3vf_dev_stop(eth_dev); hw->adapter_state = HNS3_NIC_CLOSING; hns3_reset_abort(hns); @@ -1851,10 +2202,10 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) hns3vf_uninit_vf(eth_dev); hns3_free_all_queues(eth_dev); rte_free(hw->reset.wait_data); - rte_free(eth_dev->process_private); - eth_dev->process_private = NULL; hns3_mp_uninit_primary(); - hns3_warn(hw, "Close port %d finished", hw->data->port_id); + hns3_warn(hw, "Close port %u finished", hw->data->port_id); + + return ret; } static int @@ -1875,8 +2226,11 @@ hns3vf_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, HNS3_FW_VERSION_BYTE1_S), hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, HNS3_FW_VERSION_BYTE0_S)); + if (ret < 0) + return -EINVAL; + ret += 1; /* add the size of '\0' */ - if (fw_size < (uint32_t)ret) + if (fw_size < (size_t)ret) return ret; else return 0; @@ -1902,13 +2256,18 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev, case ETH_SPEED_NUM_50G: case ETH_SPEED_NUM_100G: case ETH_SPEED_NUM_200G: - new_link.link_speed = mac->link_speed; + if (mac->link_status) + new_link.link_speed = mac->link_speed; break; default: - new_link.link_speed = ETH_SPEED_NUM_100M; + if (mac->link_status) + new_link.link_speed = ETH_SPEED_NUM_UNKNOWN; break; } + if (!mac->link_status) + new_link.link_speed = ETH_SPEED_NUM_NONE; + new_link.link_duplex = mac->link_duplex; new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; new_link.link_autoneg = @@ -1921,15 +2280,19 @@ static int hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) { struct hns3_hw *hw = &hns->hw; + uint16_t nb_rx_q = hw->data->nb_rx_queues; + uint16_t nb_tx_q = hw->data->nb_tx_queues; int ret; - ret = hns3vf_set_tc_info(hns); + ret = hns3vf_set_tc_queue_mapping(hns, nb_rx_q, nb_tx_q); if (ret) return ret; - ret = hns3_start_queues(hns, reset_queue); + hns3_enable_rxd_adv_layout(hw); + + ret = hns3_init_queues(hns, reset_queue); if (ret) - hns3_err(hw, "Failed to start queues: %d", ret); + hns3_err(hw, "failed to init queues, ret = %d.", ret); return ret; } @@ -1946,26 +2309,27 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) uint16_t q_id; int ret; - if (dev->data->dev_conf.intr_conf.rxq == 0) + /* + * hns3 needs a separate interrupt to be used as event interrupt which + * could not be shared with task queue pair, so KERNEL drivers need + * support multiple interrupt vectors. + */ + if (dev->data->dev_conf.intr_conf.rxq == 0 || + !rte_intr_cap_multiple(intr_handle)) return 0; - /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); + intr_vector = hw->used_rx_queues; + /* It creates event fd for each intr vector when MSIX is used */ + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -EINVAL; - /* check and configure queue intr-vector mapping */ - if (rte_intr_cap_multiple(intr_handle) || - !RTE_ETH_DEV_SRIOV(dev).active) { - intr_vector = hw->used_rx_queues; - /* It creates event fd for each intr vector when MSIX is used */ - if (rte_intr_efd_enable(intr_handle, intr_vector)) - return -EINVAL; - } - if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + if (intr_handle->intr_vec == NULL) { intr_handle->intr_vec = rte_zmalloc("intr_vec", hw->used_rx_queues * sizeof(int), 0); if (intr_handle->intr_vec == NULL) { - hns3_err(hw, "Failed to allocate %d rx_queues" + hns3_err(hw, "Failed to allocate %u rx_queues" " intr_vec", hw->used_rx_queues); ret = -ENOMEM; goto vf_alloc_intr_vec_error; @@ -1976,28 +2340,26 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) vec = RTE_INTR_VEC_RXTX_OFFSET; base = RTE_INTR_VEC_RXTX_OFFSET; } - if (rte_intr_dp_is_en(intr_handle)) { - for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { - ret = hns3vf_bind_ring_with_vector(hw, vec, true, - HNS3_RING_TYPE_RX, - q_id); - if (ret) - goto vf_bind_vector_error; - intr_handle->intr_vec[q_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; - } + + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hns3vf_bind_ring_with_vector(hw, vec, true, + HNS3_RING_TYPE_RX, q_id); + if (ret) + goto vf_bind_vector_error; + intr_handle->intr_vec[q_id] = vec; + /* + * If there are not enough efds (e.g. not enough interrupt), + * remaining queues will be bond to the last interrupt. + */ + if (vec < base + intr_handle->nb_efd - 1) + vec++; } rte_intr_enable(intr_handle); return 0; vf_bind_vector_error: - rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec) { - free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } - return ret; + free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; vf_alloc_intr_vec_error: rte_intr_efd_disable(intr_handle); return ret; @@ -2042,7 +2404,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) int ret; PMD_INIT_FUNC_TRACE(); - if (rte_atomic16_read(&hw->reset.resetting)) + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) return -EBUSY; rte_spinlock_lock(&hw->lock); @@ -2054,27 +2416,53 @@ hns3vf_dev_start(struct rte_eth_dev *dev) return ret; } ret = hns3vf_map_rx_interrupt(dev); - if (ret) { - hw->adapter_state = HNS3_NIC_CONFIGURED; - rte_spinlock_unlock(&hw->lock); - return ret; - } + if (ret) + goto map_rx_inter_err; + + /* + * There are three register used to control the status of a TQP + * (contains a pair of Tx queue and Rx queue) in the new version network + * engine. One is used to control the enabling of Tx queue, the other is + * used to control the enabling of Rx queue, and the last is the master + * switch used to control the enabling of the tqp. The Tx register and + * TQP register must be enabled at the same time to enable a Tx queue. + * The same applies to the Rx queue. For the older network enginem, this + * function only refresh the enabled flag, and it is used to update the + * status of queue in the dpdk framework. + */ + ret = hns3_start_all_txqs(dev); + if (ret) + goto map_rx_inter_err; + + ret = hns3_start_all_rxqs(dev); + if (ret) + goto start_all_rxqs_fail; + hw->adapter_state = HNS3_NIC_STARTED; rte_spinlock_unlock(&hw->lock); + hns3_rx_scattered_calc(dev); hns3_set_rxtx_function(dev); hns3_mp_req_start_rxtx(dev); - rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev); hns3vf_restore_filter(dev); /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); - /* - * When finished the initialization, enable queues to receive/transmit - * packets. - */ - hns3_enable_all_queues(hw, true); + hns3_start_tqps(hw); + + if (dev->data->dev_conf.intr_conf.lsc != 0) + hns3vf_dev_link_update(dev, 0); + hns3vf_start_poll_job(dev); + + return ret; + +start_all_rxqs_fail: + hns3_stop_all_txqs(dev); +map_rx_inter_err: + (void)hns3vf_do_stop(hns); + hw->adapter_state = HNS3_NIC_CONFIGURED; + rte_spinlock_unlock(&hw->lock); return ret; } @@ -2108,9 +2496,25 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) struct hns3_hw *hw = &hns->hw; enum hns3_reset_level reset; + /* + * According to the protocol of PCIe, FLR to a PF device resets the PF + * state as well as the SR-IOV extended capability including VF Enable + * which means that VFs no longer exist. + * + * HNS3_VF_FULL_RESET means PF device is in FLR reset. when PF device + * is in FLR stage, the register state of VF device is not reliable, + * so register states detection can not be carried out. In this case, + * we just ignore the register states and return false to indicate that + * there are no other reset states that need to be processed by driver. + */ + if (hw->reset.level == HNS3_VF_FULL_RESET) + return false; + + /* Check the registers to confirm whether there is reset pending */ hns3vf_check_event_cause(hns, NULL); reset = hns3vf_get_reset_level(hw, &hw->reset.pending); - if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { + if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET && + hw->reset.level < reset) { hns3_warn(hw, "High level reset %d is pending", reset); return true; } @@ -2148,7 +2552,7 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete"); return -EAGAIN; } else if (wait_data->result == HNS3_WAIT_TIMEOUT) { - gettimeofday(&tv, NULL); + hns3_clock_gettime(&tv); hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", tv.tv_sec, tv.tv_usec); return -ETIME; @@ -2158,7 +2562,7 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) wait_data->hns = hns; wait_data->check_completion = is_vf_reset_done; wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT * - HNS3VF_RESET_WAIT_MS + get_timeofday_ms(); + HNS3VF_RESET_WAIT_MS + hns3_clock_gettime_ms(); wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC; wait_data->count = HNS3VF_RESET_WAIT_CNT; wait_data->result = HNS3_WAIT_REQUEST; @@ -2170,15 +2574,17 @@ static int hns3vf_prepare_reset(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; - int ret = 0; + int ret; if (hw->reset.level == HNS3_VF_FUNC_RESET) { ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL, 0, true, NULL, 0); + if (ret) + return ret; } - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); - return ret; + return 0; } static int @@ -2188,19 +2594,27 @@ hns3vf_stop_service(struct hns3_adapter *hns) struct rte_eth_dev *eth_dev; eth_dev = &rte_eth_devices[hw->data->port_id]; - if (hw->adapter_state == HNS3_NIC_STARTED) - rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev); + if (hw->adapter_state == HNS3_NIC_STARTED) { + /* + * Make sure call update link status before hns3vf_stop_poll_job + * because update link status depend on polling job exist. + */ + hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed, + hw->mac.link_duplex); + hns3vf_stop_poll_job(eth_dev); + } hw->mac.link_status = ETH_LINK_DOWN; hns3_set_rxtx_function(eth_dev); rte_wmb(); /* Disable datapath on secondary process. */ hns3_mp_req_stop_rxtx(eth_dev); - rte_delay_ms(hw->tqps_num); + rte_delay_ms(hw->cfg_max_queues); rte_spinlock_lock(&hw->lock); if (hw->adapter_state == HNS3_NIC_STARTED || hw->adapter_state == HNS3_NIC_STOPPING) { + hns3_enable_all_queues(hw, false); hns3vf_do_stop(hns); hw->reset.mbuf_deferred_free = true; } else @@ -2211,7 +2625,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) * from table space. Hence, for function reset software intervention is * required to delete the entries. */ - if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) hns3vf_configure_all_mc_mac_addr(hns, true); rte_spinlock_unlock(&hw->lock); @@ -2228,10 +2642,15 @@ hns3vf_start_service(struct hns3_adapter *hns) hns3_set_rxtx_function(eth_dev); hns3_mp_req_start_rxtx(eth_dev); if (hw->adapter_state == HNS3_NIC_STARTED) { - hns3vf_service_handler(eth_dev); + hns3vf_start_poll_job(eth_dev); /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); + /* + * Enable state of each rxq and txq will be recovered after + * reset, so we need to restore them before enable all tqps; + */ + hns3_restore_tqp_enable_state(hw); /* * When finished the initialization, enable queues to receive * and transmit packets. @@ -2272,7 +2691,7 @@ hns3vf_check_default_mac_change(struct hns3_hw *hw) ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac); if (!ret) { rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]); - rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, &hw->data->mac_addrs[0]); hns3_warn(hw, "Default MAC address has been changed to:" " %s by the host PF kernel ethdev driver", @@ -2328,6 +2747,13 @@ hns3vf_restore_conf(struct hns3_adapter *hns) hns3_info(hw, "hns3vf dev restart successful!"); } else if (hw->adapter_state == HNS3_NIC_STOPPING) hw->adapter_state = HNS3_NIC_CONFIGURED; + + ret = hns3vf_set_alive(hw, true); + if (ret) { + hns3_err(hw, "failed to VF send alive to PF: %d", ret); + goto err_vlan_table; + } + return 0; err_vlan_table: @@ -2378,8 +2804,10 @@ hns3vf_reset_service(void *param) * The interrupt may have been lost. It is necessary to handle * the interrupt to recover from the error. */ - if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) { - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_DEFERRED) { + __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, + __ATOMIC_RELAXED); hns3_err(hw, "Handling interrupts in delayed tasks"); hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]); reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending); @@ -2388,7 +2816,7 @@ hns3vf_reset_service(void *param) hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); } } - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE); + __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); /* * Hardware reset has been notified, we now have to poll & check if @@ -2396,14 +2824,13 @@ hns3vf_reset_service(void *param) */ reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending); if (reset_level != HNS3_NONE_RESET) { - gettimeofday(&tv_start, NULL); + hns3_clock_gettime(&tv_start); hns3_reset_process(hns, reset_level); - gettimeofday(&tv, NULL); + hns3_clock_gettime(&tv); timersub(&tv, &tv_start, &tv_delta); - msec = tv_delta.tv_sec * MSEC_PER_SEC + - tv_delta.tv_usec / USEC_PER_MSEC; + msec = hns3_clock_calctime_ms(&tv_delta); if (msec > HNS3_RESET_PROCESS_MS) - hns3_err(hw, "%d handle long time delta %" PRIx64 + hns3_err(hw, "%d handle long time delta %" PRIu64 " ms time=%ld.%.6ld", hw->reset.level, msec, tv.tv_sec, tv.tv_usec); } @@ -2419,7 +2846,11 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) if (hw->reset.level == HNS3_VF_FULL_RESET) { rte_intr_disable(&pci_dev->intr_handle); - hns3vf_set_bus_master(pci_dev, true); + ret = hns3vf_set_bus_master(pci_dev, true); + if (ret < 0) { + hns3_err(hw, "failed to set pci bus, ret = %d", ret); + return ret; + } } /* Firmware command initialize */ @@ -2443,7 +2874,7 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) rte_intr_enable(&pci_dev->intr_handle); } - ret = hns3_reset_all_queues(hns); + ret = hns3_reset_all_tqps(hns); if (ret) { hns3_err(hw, "Failed to reset all queues: %d", ret); return ret; @@ -2459,6 +2890,7 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) } static const struct eth_dev_ops hns3vf_eth_dev_ops = { + .dev_configure = hns3vf_dev_configure, .dev_start = hns3vf_dev_start, .dev_stop = hns3vf_dev_stop, .dev_close = hns3vf_dev_close, @@ -2480,11 +2912,16 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = { .tx_queue_setup = hns3_tx_queue_setup, .rx_queue_release = hns3_dev_rx_queue_release, .tx_queue_release = hns3_dev_tx_queue_release, + .rx_queue_start = hns3_dev_rx_queue_start, + .rx_queue_stop = hns3_dev_rx_queue_stop, + .tx_queue_start = hns3_dev_tx_queue_start, + .tx_queue_stop = hns3_dev_tx_queue_stop, .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, .rxq_info_get = hns3_rxq_info_get, .txq_info_get = hns3_txq_info_get, - .dev_configure = hns3vf_dev_configure, + .rx_burst_mode_get = hns3_rx_burst_mode_get, + .tx_burst_mode_get = hns3_tx_burst_mode_get, .mac_addr_add = hns3vf_add_mac_addr, .mac_addr_remove = hns3vf_remove_mac_addr, .mac_addr_set = hns3vf_set_default_mac_addr, @@ -2494,11 +2931,12 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = { .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, .reta_update = hns3_dev_rss_reta_update, .reta_query = hns3_dev_rss_reta_query, - .filter_ctrl = hns3_dev_filter_ctrl, + .flow_ops_get = hns3_dev_flow_ops_get, .vlan_filter_set = hns3vf_vlan_filter_set, .vlan_offload_set = hns3vf_vlan_offload_set, .get_reg = hns3_get_regs, .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, + .tx_done_cleanup = hns3_tx_done_cleanup, }; static const struct hns3_reset_ops hns3vf_reset_ops = { @@ -2520,20 +2958,11 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); - eth_dev->process_private = (struct hns3_process_private *) - rte_zmalloc_socket("hns3_filter_list", - sizeof(struct hns3_process_private), - RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node); - if (eth_dev->process_private == NULL) { - PMD_INIT_LOG(ERR, "Failed to alloc memory for process private"); - return -ENOMEM; - } - - /* initialize flow filter lists */ - hns3_filterlist_init(eth_dev); + hns3_flow_init(eth_dev); hns3_set_rxtx_function(eth_dev); eth_dev->dev_ops = &hns3vf_eth_dev_ops; + eth_dev->rx_queue_count = hns3_rx_queue_count; if (rte_eal_process_type() != RTE_PROC_PRIMARY) { ret = hns3_mp_init_secondary(); if (ret) { @@ -2541,8 +2970,8 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) "process, ret = %d", ret); goto err_mp_init_secondary; } - hw->secondary_cnt++; + hns3_tx_push_init(eth_dev); return 0; } @@ -2557,6 +2986,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) hw->adapter_state = HNS3_NIC_UNINITIALIZED; hns->is_vf = true; hw->data = eth_dev->data; + hns3_parse_devargs(eth_dev); ret = hns3_reset_init(hw); if (ret) @@ -2582,16 +3012,26 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) goto err_rte_zmalloc; } + /* + * The hns3 PF ethdev driver in kernel support setting VF MAC address + * on the host by "ip link set ..." command. To avoid some incorrect + * scenes, for example, hns3 VF PMD driver fails to receive and send + * packets after user configure the MAC address by using the + * "ip link set ..." command, hns3 VF PMD driver keep the same MAC + * address strategy as the hns3 kernel ethdev driver in the + * initialization. If user configure a MAC address by the ip command + * for VF device, then hns3 VF PMD driver will start with it, otherwise + * start with a random MAC address in the initialization. + */ + if (rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr)) + rte_eth_random_addr(hw->mac.mac_addr); rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, ð_dev->data->mac_addrs[0]); + hw->adapter_state = HNS3_NIC_INITIALIZED; - /* - * Pass the information to the rte_eth_dev_close() that it should also - * release the private port resources. - */ - eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; - if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) { + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_PENDING) { hns3_err(hw, "Reschedule reset service after dev_init"); hns3_schedule_reset(hns); } else { @@ -2615,10 +3055,10 @@ err_mp_init_primary: err_mp_init_secondary: eth_dev->dev_ops = NULL; eth_dev->rx_pkt_burst = NULL; + eth_dev->rx_descriptor_status = NULL; eth_dev->tx_pkt_burst = NULL; eth_dev->tx_pkt_prepare = NULL; - rte_free(eth_dev->process_private); - eth_dev->process_private = NULL; + eth_dev->tx_descriptor_status = NULL; return ret; } @@ -2632,12 +3072,7 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; - - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - eth_dev->tx_pkt_prepare = NULL; + return 0; if (hw->adapter_state < HNS3_NIC_CLOSING) hns3vf_dev_close(eth_dev); @@ -2664,12 +3099,12 @@ eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev) static const struct rte_pci_id pci_id_hns3vf_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) }, - { .vendor_id = 0, /* sentinel */ }, + { .vendor_id = 0, }, /* sentinel */ }; static struct rte_pci_driver rte_hns3vf_pmd = { .id_table = pci_id_hns3vf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_hns3vf_pci_probe, .remove = eth_hns3vf_pci_remove, }; @@ -2677,3 +3112,7 @@ static struct rte_pci_driver rte_hns3vf_pmd = { RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map); RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf, + HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " + HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " + HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> ");