1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
5 #include <linux/pci_regs.h>
7 #include <ethdev_pci.h>
12 #include "hns3_ethdev.h"
13 #include "hns3_common.h"
14 #include "hns3_logs.h"
15 #include "hns3_rxtx.h"
16 #include "hns3_regs.h"
17 #include "hns3_intr.h"
20 #include "hns3_flow.h"
22 #define HNS3VF_KEEP_ALIVE_INTERVAL 2000000 /* us */
23 #define HNS3VF_SERVICE_INTERVAL 1000000 /* us */
25 #define HNS3VF_RESET_WAIT_MS 20
26 #define HNS3VF_RESET_WAIT_CNT 2000
28 /* Reset related Registers */
29 #define HNS3_GLOBAL_RESET_BIT 0
30 #define HNS3_CORE_RESET_BIT 1
31 #define HNS3_IMP_RESET_BIT 2
32 #define HNS3_FUN_RST_ING_B 0
34 enum hns3vf_evt_cause {
35 HNS3VF_VECTOR0_EVENT_RST,
36 HNS3VF_VECTOR0_EVENT_MBX,
37 HNS3VF_VECTOR0_EVENT_OTHER,
40 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
42 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
43 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
45 static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
46 struct rte_ether_addr *mac_addr);
47 static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
48 struct rte_ether_addr *mac_addr);
49 static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
50 __rte_unused int wait_to_complete);
52 /* set PCI bus mastering */
54 hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
59 ret = rte_pci_read_config(device, ®, sizeof(reg), PCI_COMMAND);
61 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
67 /* set the master bit */
68 reg |= PCI_COMMAND_MASTER;
70 reg &= ~(PCI_COMMAND_MASTER);
72 return rte_pci_write_config(device, ®, sizeof(reg), PCI_COMMAND);
76 * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
77 * @cap: the capability
79 * Return the address of the given capability within the PCI capability list.
82 hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
84 #define MAX_PCIE_CAPABILITY 48
91 ret = rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
93 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_STATUS);
97 if (!(status & PCI_STATUS_CAP_LIST))
100 ttl = MAX_PCIE_CAPABILITY;
101 ret = rte_pci_read_config(device, &pos, sizeof(pos),
102 PCI_CAPABILITY_LIST);
104 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
105 PCI_CAPABILITY_LIST);
109 while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
110 ret = rte_pci_read_config(device, &id, sizeof(id),
111 (pos + PCI_CAP_LIST_ID));
113 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
114 (pos + PCI_CAP_LIST_ID));
124 ret = rte_pci_read_config(device, &pos, sizeof(pos),
125 (pos + PCI_CAP_LIST_NEXT));
127 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
128 (pos + PCI_CAP_LIST_NEXT));
136 hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
142 pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
144 ret = rte_pci_read_config(device, &control, sizeof(control),
145 (pos + PCI_MSIX_FLAGS));
147 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
148 (pos + PCI_MSIX_FLAGS));
153 control |= PCI_MSIX_FLAGS_ENABLE;
155 control &= ~PCI_MSIX_FLAGS_ENABLE;
156 ret = rte_pci_write_config(device, &control, sizeof(control),
157 (pos + PCI_MSIX_FLAGS));
159 PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x",
160 (pos + PCI_MSIX_FLAGS));
171 hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
173 /* mac address was checked by upper level interface */
174 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
177 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
178 HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
179 RTE_ETHER_ADDR_LEN, false, NULL, 0);
181 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
183 hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
190 hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
192 /* mac address was checked by upper level interface */
193 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
196 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
197 HNS3_MBX_MAC_VLAN_UC_REMOVE,
198 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN,
201 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
203 hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
210 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
211 struct rte_ether_addr *mac_addr)
213 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
214 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
215 struct rte_ether_addr *old_addr;
216 uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
217 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
221 * It has been guaranteed that input parameter named mac_addr is valid
222 * address in the rte layer of DPDK framework.
224 old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
225 rte_spinlock_lock(&hw->lock);
226 memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
227 memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
230 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
231 HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
232 HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
235 * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev
236 * driver. When user has configured a MAC address for VF device
237 * by "ip link set ..." command based on the PF device, the hns3
238 * PF kernel ethdev driver does not allow VF driver to request
239 * reconfiguring a different default MAC address, and return
240 * -EPREM to VF driver through mailbox.
243 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
245 hns3_warn(hw, "Has permanet mac addr(%s) for vf",
248 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
250 hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
255 rte_ether_addr_copy(mac_addr,
256 (struct rte_ether_addr *)hw->mac.mac_addr);
257 rte_spinlock_unlock(&hw->lock);
263 hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
264 struct rte_ether_addr *mac_addr)
266 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
269 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
270 HNS3_MBX_MAC_VLAN_MC_ADD,
271 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
274 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
276 hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
284 hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
285 struct rte_ether_addr *mac_addr)
287 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
290 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
291 HNS3_MBX_MAC_VLAN_MC_REMOVE,
292 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
295 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
297 hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
305 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
306 bool en_uc_pmc, bool en_mc_pmc)
308 struct hns3_mbx_vf_to_pf_cmd *req;
309 struct hns3_cmd_desc desc;
312 req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
315 * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev driver,
316 * so there are some features for promiscuous/allmulticast mode in hns3
317 * VF PMD driver as below:
318 * 1. The promiscuous/allmulticast mode can be configured successfully
319 * only based on the trusted VF device. If based on the non trusted
320 * VF device, configuring promiscuous/allmulticast mode will fail.
321 * The hns3 VF device can be confiruged as trusted device by hns3 PF
322 * kernel ethdev driver on the host by the following command:
323 * "ip link set <eth num> vf <vf id> turst on"
324 * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
325 * driver can receive the ingress and outgoing traffic. In the words,
326 * all the ingress packets, all the packets sent from the PF and
327 * other VFs on the same physical port.
328 * 3. Note: Because of the hardware constraints, By default vlan filter
329 * is enabled and couldn't be turned off based on VF device, so vlan
330 * filter is still effective even in promiscuous mode. If upper
331 * applications don't call rte_eth_dev_vlan_filter API function to
332 * set vlan based on VF device, hns3 VF PMD driver will can't receive
333 * the packets with vlan tag in promiscuoue mode.
335 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
336 req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
337 req->msg[1] = en_bc_pmc ? 1 : 0;
338 req->msg[2] = en_uc_pmc ? 1 : 0;
339 req->msg[3] = en_mc_pmc ? 1 : 0;
340 req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0;
342 ret = hns3_cmd_send(hw, &desc, 1);
344 hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
350 hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
352 struct hns3_adapter *hns = dev->data->dev_private;
353 struct hns3_hw *hw = &hns->hw;
356 ret = hns3vf_set_promisc_mode(hw, true, true, true);
358 hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
364 hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
366 bool allmulti = dev->data->all_multicast ? true : false;
367 struct hns3_adapter *hns = dev->data->dev_private;
368 struct hns3_hw *hw = &hns->hw;
371 ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
373 hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
379 hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
381 struct hns3_adapter *hns = dev->data->dev_private;
382 struct hns3_hw *hw = &hns->hw;
385 if (dev->data->promiscuous)
388 ret = hns3vf_set_promisc_mode(hw, true, false, true);
390 hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
396 hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
398 struct hns3_adapter *hns = dev->data->dev_private;
399 struct hns3_hw *hw = &hns->hw;
402 if (dev->data->promiscuous)
405 ret = hns3vf_set_promisc_mode(hw, true, false, false);
407 hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
413 hns3vf_restore_promisc(struct hns3_adapter *hns)
415 struct hns3_hw *hw = &hns->hw;
416 bool allmulti = hw->data->all_multicast ? true : false;
418 if (hw->data->promiscuous)
419 return hns3vf_set_promisc_mode(hw, true, true, true);
421 return hns3vf_set_promisc_mode(hw, true, false, allmulti);
425 hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id,
426 bool mmap, enum hns3_ring_type queue_type,
429 struct hns3_vf_bind_vector_msg bind_msg;
434 memset(&bind_msg, 0, sizeof(bind_msg));
435 code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
436 HNS3_MBX_UNMAP_RING_TO_VECTOR;
437 bind_msg.vector_id = (uint8_t)vector_id;
439 if (queue_type == HNS3_RING_TYPE_RX)
440 bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
442 bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
444 bind_msg.param[0].ring_type = queue_type;
445 bind_msg.ring_num = 1;
446 bind_msg.param[0].tqp_index = queue_id;
447 op_str = mmap ? "Map" : "Unmap";
448 ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
449 sizeof(bind_msg), false, NULL, 0);
451 hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.",
452 op_str, queue_id, bind_msg.vector_id, ret);
458 hns3vf_dev_configure(struct rte_eth_dev *dev)
460 struct hns3_adapter *hns = dev->data->dev_private;
461 struct hns3_hw *hw = &hns->hw;
462 struct rte_eth_conf *conf = &dev->data->dev_conf;
463 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
464 uint16_t nb_rx_q = dev->data->nb_rx_queues;
465 uint16_t nb_tx_q = dev->data->nb_tx_queues;
466 struct rte_eth_rss_conf rss_conf;
470 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
473 * Some versions of hardware network engine does not support
474 * individually enable/disable/reset the Tx or Rx queue. These devices
475 * must enable/disable/reset Tx and Rx queues at the same time. When the
476 * numbers of Tx queues allocated by upper applications are not equal to
477 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
478 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
479 * work as usual. But these fake queues are imperceptible, and can not
480 * be used by upper applications.
482 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
484 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
485 hw->cfg_max_queues = 0;
489 hw->adapter_state = HNS3_NIC_CONFIGURING;
490 if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
491 hns3_err(hw, "setting link speed/duplex not supported");
496 /* When RSS is not configured, redirect the packet queue 0 */
497 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
498 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
499 hw->rss_dis_flag = false;
500 rss_conf = conf->rx_adv_conf.rss_conf;
501 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
506 ret = hns3vf_dev_mtu_set(dev, conf->rxmode.mtu);
510 ret = hns3vf_dev_configure_vlan(dev);
514 /* config hardware GRO */
515 gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
516 ret = hns3_config_gro(hw, gro_en);
520 hns3_init_rx_ptype_tble(dev);
522 hw->adapter_state = HNS3_NIC_CONFIGURED;
526 hw->cfg_max_queues = 0;
527 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
528 hw->adapter_state = HNS3_NIC_INITIALIZED;
534 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
538 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
539 sizeof(mtu), true, NULL, 0);
541 hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
547 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
549 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
550 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
554 * The hns3 PF/VF devices on the same port share the hardware MTU
555 * configuration. Currently, we send mailbox to inform hns3 PF kernel
556 * ethdev driver to finish hardware MTU configuration in hns3 VF PMD
557 * driver, there is no need to stop the port for hns3 VF device, and the
558 * MTU value issued by hns3 VF PMD driver must be less than or equal to
561 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
562 hns3_err(hw, "Failed to set mtu during resetting");
567 * when Rx of scattered packets is off, we have some possibility of
568 * using vector Rx process function or simple Rx functions in hns3 PMD
569 * driver. If the input MTU is increased and the maximum length of
570 * received packets is greater than the length of a buffer for Rx
571 * packet, the hardware network engine needs to use multiple BDs and
572 * buffers to store these packets. This will cause problems when still
573 * using vector Rx process function or simple Rx function to receiving
574 * packets. So, when Rx of scattered packets is off and device is
575 * started, it is not permitted to increase MTU so that the maximum
576 * length of Rx packets is greater than Rx buffer length.
578 if (dev->data->dev_started && !dev->data->scattered_rx &&
579 frame_size > hw->rx_buf_len) {
580 hns3_err(hw, "failed to set mtu because current is "
581 "not scattered rx mode");
585 rte_spinlock_lock(&hw->lock);
586 ret = hns3vf_config_mtu(hw, mtu);
588 rte_spinlock_unlock(&hw->lock);
591 rte_spinlock_unlock(&hw->lock);
597 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
599 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
603 hns3vf_disable_irq0(struct hns3_hw *hw)
605 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
609 hns3vf_enable_irq0(struct hns3_hw *hw)
611 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
614 static enum hns3vf_evt_cause
615 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
617 struct hns3_hw *hw = &hns->hw;
618 enum hns3vf_evt_cause ret;
619 uint32_t cmdq_stat_reg;
620 uint32_t rst_ing_reg;
623 /* Fetch the events from their corresponding regs */
624 cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
625 if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
626 rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
627 hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
628 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
629 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
630 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
631 hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
632 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
634 hw->reset.stats.global_cnt++;
635 hns3_warn(hw, "Global reset detected, clear reset status");
637 hns3_schedule_delayed_reset(hns);
638 hns3_warn(hw, "Global reset detected, don't clear reset status");
641 ret = HNS3VF_VECTOR0_EVENT_RST;
645 /* Check for vector0 mailbox(=CMDQ RX) event source */
646 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
647 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
648 ret = HNS3VF_VECTOR0_EVENT_MBX;
653 ret = HNS3VF_VECTOR0_EVENT_OTHER;
661 hns3vf_interrupt_handler(void *param)
663 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
664 struct hns3_adapter *hns = dev->data->dev_private;
665 struct hns3_hw *hw = &hns->hw;
666 enum hns3vf_evt_cause event_cause;
669 /* Disable interrupt */
670 hns3vf_disable_irq0(hw);
672 /* Read out interrupt causes */
673 event_cause = hns3vf_check_event_cause(hns, &clearval);
674 /* Clear interrupt causes */
675 hns3vf_clear_event_cause(hw, clearval);
677 switch (event_cause) {
678 case HNS3VF_VECTOR0_EVENT_RST:
679 hns3_schedule_reset(hns);
681 case HNS3VF_VECTOR0_EVENT_MBX:
682 hns3_dev_handle_mbx_msg(hw);
688 /* Enable interrupt */
689 hns3vf_enable_irq0(hw);
693 hns3vf_set_default_dev_specifications(struct hns3_hw *hw)
695 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
696 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
697 hw->rss_key_size = HNS3_RSS_KEY_SIZE;
698 hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
702 hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
704 struct hns3_dev_specs_0_cmd *req0;
706 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
708 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
709 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
710 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
711 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
715 hns3vf_check_dev_specifications(struct hns3_hw *hw)
717 if (hw->rss_ind_tbl_size == 0 ||
718 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
719 hns3_warn(hw, "the size of hash lookup table configured (%u)"
720 " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
721 HNS3_RSS_IND_TBL_SIZE_MAX);
729 hns3vf_query_dev_specifications(struct hns3_hw *hw)
731 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
735 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
736 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
738 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
740 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
742 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
746 hns3vf_parse_dev_specifications(hw, desc);
748 return hns3vf_check_dev_specifications(hw);
752 hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
754 uint16_t val = supported ? HNS3_PF_PUSH_LSC_CAP_SUPPORTED :
755 HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
756 uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
757 struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
759 if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
760 __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
761 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
765 hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
767 #define HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS 500
769 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
770 int32_t remain_ms = HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS;
771 uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
772 uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
773 struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
775 __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
778 (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
781 while (remain_ms > 0) {
782 rte_delay_ms(HNS3_POLL_RESPONE_MS);
783 if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
784 HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
790 * When exit above loop, the pf_push_lsc_cap could be one of the three
791 * state: unknown (means pf not ack), not_supported, supported.
792 * Here config it as 'not_supported' when it's 'unknown' state.
794 __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
795 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
797 if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
798 HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
799 hns3_info(hw, "detect PF support push link status change!");
802 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
803 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. So here cleared
804 * the RTE_ETH_DEV_INTR_LSC capability.
806 dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
811 hns3vf_get_capability(struct hns3_hw *hw)
813 struct rte_pci_device *pci_dev;
814 struct rte_eth_dev *eth_dev;
818 eth_dev = &rte_eth_devices[hw->data->port_id];
819 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
821 /* Get PCI revision id */
822 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
823 HNS3_PCI_REVISION_ID);
824 if (ret != HNS3_PCI_REVISION_ID_LEN) {
825 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
829 hw->revision = revision;
831 if (revision < PCI_REVISION_ID_HIP09_A) {
832 hns3vf_set_default_dev_specifications(hw);
833 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
834 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
835 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
836 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
837 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
838 hw->rss_info.ipv6_sctp_offload_supported = false;
839 hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
843 ret = hns3vf_query_dev_specifications(hw);
846 "failed to query dev specifications, ret = %d",
851 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
852 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
853 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
854 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
855 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
856 hw->rss_info.ipv6_sctp_offload_supported = true;
857 hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
863 hns3vf_check_tqp_info(struct hns3_hw *hw)
865 if (hw->tqps_num == 0) {
866 PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF.");
870 if (hw->rss_size_max == 0) {
871 PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF.");
875 hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num);
881 hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
886 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
887 HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0,
888 true, &resp_msg, sizeof(resp_msg));
892 * Getting current port based VLAN state from PF driver
893 * will not affect VF driver's basic function. Because
894 * the VF driver relies on hns3 PF kernel ether driver,
895 * to avoid introducing compatibility issues with older
896 * version of PF driver, no failure will be returned
897 * when the return value is ETIME. This return value has
898 * the following scenarios:
899 * 1) Firmware didn't return the results in time
900 * 2) the result return by firmware is timeout
901 * 3) the older version of kernel side PF driver does
902 * not support this mailbox message.
903 * For scenarios 1 and 2, it is most likely that a
904 * hardware error has occurred, or a hardware reset has
905 * occurred. In this case, these errors will be caught
906 * by other functions.
908 PMD_INIT_LOG(WARNING,
909 "failed to get PVID state for timeout, maybe "
910 "kernel side PF driver doesn't support this "
911 "mailbox message, or firmware didn't respond.");
912 resp_msg = HNS3_PORT_BASE_VLAN_DISABLE;
914 PMD_INIT_LOG(ERR, "failed to get port based VLAN state,"
919 hw->port_base_vlan_cfg.state = resp_msg ?
920 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
925 hns3vf_get_queue_info(struct hns3_hw *hw)
927 #define HNS3VF_TQPS_RSS_INFO_LEN 6
928 uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
931 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
932 resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
934 PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
938 memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
939 memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
941 return hns3vf_check_tqp_info(hw);
945 hns3vf_get_queue_depth(struct hns3_hw *hw)
947 #define HNS3VF_TQPS_DEPTH_INFO_LEN 4
948 uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN];
951 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true,
952 resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN);
954 PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d",
959 memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t));
960 memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t));
966 hns3vf_update_caps(struct hns3_hw *hw, uint32_t caps)
968 if (hns3_get_bit(caps, HNS3VF_CAPS_VLAN_FLT_MOD_B))
969 hns3_set_bit(hw->capability,
970 HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 1);
974 hns3vf_get_num_tc(struct hns3_hw *hw)
979 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
980 if (hw->hw_tc_map & BIT(i))
987 hns3vf_get_basic_info(struct hns3_hw *hw)
989 uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE];
990 struct hns3_basic_info *basic_info;
993 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0,
994 true, resp_msg, sizeof(resp_msg));
996 hns3_err(hw, "failed to get basic info from PF, ret = %d.",
1001 basic_info = (struct hns3_basic_info *)resp_msg;
1002 hw->hw_tc_map = basic_info->hw_tc_map;
1003 hw->num_tc = hns3vf_get_num_tc(hw);
1004 hw->pf_vf_if_version = basic_info->pf_vf_if_version;
1005 hns3vf_update_caps(hw, basic_info->caps);
1011 hns3vf_get_host_mac_addr(struct hns3_hw *hw)
1013 uint8_t host_mac[RTE_ETHER_ADDR_LEN];
1016 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
1017 true, host_mac, RTE_ETHER_ADDR_LEN);
1019 hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
1023 memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
1029 hns3vf_get_configuration(struct hns3_hw *hw)
1033 hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
1034 hw->rss_dis_flag = false;
1036 /* Get device capability */
1037 ret = hns3vf_get_capability(hw);
1039 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
1043 hns3vf_get_push_lsc_cap(hw);
1045 /* Get basic info from PF */
1046 ret = hns3vf_get_basic_info(hw);
1050 /* Get queue configuration from PF */
1051 ret = hns3vf_get_queue_info(hw);
1055 /* Get queue depth info from PF */
1056 ret = hns3vf_get_queue_depth(hw);
1060 /* Get user defined VF MAC addr from PF */
1061 ret = hns3vf_get_host_mac_addr(hw);
1065 return hns3vf_get_port_base_vlan_filter_state(hw);
1069 hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q,
1072 struct hns3_hw *hw = &hns->hw;
1074 return hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
1078 hns3vf_request_link_info(struct hns3_hw *hw)
1080 struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1084 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1087 send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
1088 vf->req_link_info_cnt > 0;
1092 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
1095 hns3_err(hw, "failed to fetch link status, ret = %d", ret);
1099 if (vf->req_link_info_cnt > 0)
1100 vf->req_link_info_cnt--;
1104 hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
1105 uint32_t link_speed, uint8_t link_duplex)
1107 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1108 struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1109 struct hns3_mac *mac = &hw->mac;
1113 * PF kernel driver may push link status when VF driver is in resetting,
1114 * driver will stop polling job in this case, after resetting done
1115 * driver will start polling job again.
1116 * When polling job started, driver will get initial link status by
1117 * sending request to PF kernel driver, then could update link status by
1118 * process PF kernel driver's link status mailbox message.
1120 if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
1123 if (hw->adapter_state != HNS3_NIC_STARTED)
1126 mac->link_status = link_status;
1127 mac->link_speed = link_speed;
1128 mac->link_duplex = link_duplex;
1129 ret = hns3vf_dev_link_update(dev, 0);
1130 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
1131 hns3_start_report_lse(dev);
1135 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
1137 #define HNS3VF_VLAN_MBX_MSG_LEN 5
1138 struct hns3_hw *hw = &hns->hw;
1139 uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
1140 uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
1141 uint8_t is_kill = on ? 0 : 1;
1143 msg_data[0] = is_kill;
1144 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1145 memcpy(&msg_data[3], &proto, sizeof(proto));
1147 return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
1148 msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
1153 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1155 struct hns3_adapter *hns = dev->data->dev_private;
1156 struct hns3_hw *hw = &hns->hw;
1159 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1161 "vf set vlan id failed during resetting, vlan_id =%u",
1165 rte_spinlock_lock(&hw->lock);
1166 ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1167 rte_spinlock_unlock(&hw->lock);
1169 hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
1176 hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
1181 if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD))
1184 msg_data = enable ? 1 : 0;
1185 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
1186 HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data,
1187 sizeof(msg_data), true, NULL, 0);
1189 hns3_err(hw, "%s vlan filter failed, ret = %d.",
1190 enable ? "enable" : "disable", ret);
1196 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
1201 msg_data = enable ? 1 : 0;
1202 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
1203 &msg_data, sizeof(msg_data), false, NULL, 0);
1205 hns3_err(hw, "vf %s strip failed, ret = %d.",
1206 enable ? "enable" : "disable", ret);
1212 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1214 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1216 unsigned int tmp_mask;
1219 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1220 hns3_err(hw, "vf set vlan offload failed during resetting, "
1221 "mask = 0x%x", mask);
1225 tmp_mask = (unsigned int)mask;
1227 if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
1228 rte_spinlock_lock(&hw->lock);
1229 /* Enable or disable VLAN filter */
1230 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1231 ret = hns3vf_en_vlan_filter(hw, true);
1233 ret = hns3vf_en_vlan_filter(hw, false);
1234 rte_spinlock_unlock(&hw->lock);
1239 /* Vlan stripping setting */
1240 if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
1241 rte_spinlock_lock(&hw->lock);
1242 /* Enable or disable VLAN stripping */
1243 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1244 ret = hns3vf_en_hw_strip_rxvtag(hw, true);
1246 ret = hns3vf_en_hw_strip_rxvtag(hw, false);
1247 rte_spinlock_unlock(&hw->lock);
1254 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
1256 struct rte_vlan_filter_conf *vfc;
1257 struct hns3_hw *hw = &hns->hw;
1264 vfc = &hw->data->vlan_filter_conf;
1265 for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1266 if (vfc->ids[i] == 0)
1271 * 64 means the num bits of ids, one bit corresponds to
1275 /* count trailing zeroes */
1276 vbit = ~ids & (ids - 1);
1277 /* clear least significant bit set */
1278 ids ^= (ids ^ (ids - 1)) ^ vbit;
1283 ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1286 "VF handle vlan table failed, ret =%d, on = %d",
1297 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1299 return hns3vf_handle_all_vlan_table(hns, 0);
1303 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1305 struct hns3_hw *hw = &hns->hw;
1306 struct rte_eth_conf *dev_conf;
1310 dev_conf = &hw->data->dev_conf;
1311 en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
1313 ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1315 hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1321 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1323 struct hns3_adapter *hns = dev->data->dev_private;
1324 struct rte_eth_dev_data *data = dev->data;
1325 struct hns3_hw *hw = &hns->hw;
1328 if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1329 data->dev_conf.txmode.hw_vlan_reject_untagged ||
1330 data->dev_conf.txmode.hw_vlan_insert_pvid) {
1331 hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1332 "or hw_vlan_insert_pvid is not support!");
1335 /* Apply vlan offload setting */
1336 ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
1337 RTE_ETH_VLAN_FILTER_MASK);
1339 hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
1345 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1349 msg_data = alive ? 1 : 0;
1350 return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
1351 sizeof(msg_data), false, NULL, 0);
1355 hns3vf_keep_alive_handler(void *param)
1357 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1358 struct hns3_adapter *hns = eth_dev->data->dev_private;
1359 struct hns3_hw *hw = &hns->hw;
1362 ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
1365 hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1368 rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1373 hns3vf_service_handler(void *param)
1375 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1376 struct hns3_adapter *hns = eth_dev->data->dev_private;
1377 struct hns3_hw *hw = &hns->hw;
1380 * The query link status and reset processing are executed in the
1381 * interrupt thread. When the IMP reset occurs, IMP will not respond,
1382 * and the query operation will timeout after 30ms. In the case of
1383 * multiple PF/VFs, each query failure timeout causes the IMP reset
1384 * interrupt to fail to respond within 100ms.
1385 * Before querying the link status, check whether there is a reset
1386 * pending, and if so, abandon the query.
1388 if (!hns3vf_is_reset_pending(hns))
1389 hns3vf_request_link_info(hw);
1391 hns3_warn(hw, "Cancel the query when reset is pending");
1393 rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1398 hns3vf_start_poll_job(struct rte_eth_dev *dev)
1400 #define HNS3_REQUEST_LINK_INFO_REMAINS_CNT 3
1402 struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1404 if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
1405 vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
1407 __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
1409 hns3vf_service_handler(dev);
1413 hns3vf_stop_poll_job(struct rte_eth_dev *dev)
1415 struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1417 rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1419 __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
1423 hns3_query_vf_resource(struct hns3_hw *hw)
1425 struct hns3_vf_res_cmd *req;
1426 struct hns3_cmd_desc desc;
1430 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
1431 ret = hns3_cmd_send(hw, &desc, 1);
1433 hns3_err(hw, "query vf resource failed, ret = %d", ret);
1437 req = (struct hns3_vf_res_cmd *)desc.data;
1438 num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
1439 HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
1440 if (num_msi < HNS3_MIN_VECTOR_NUM) {
1441 hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
1442 num_msi, HNS3_MIN_VECTOR_NUM);
1446 hw->num_msi = num_msi;
1452 hns3vf_init_hardware(struct hns3_adapter *hns)
1454 struct hns3_hw *hw = &hns->hw;
1455 uint16_t mtu = hw->data->mtu;
1458 ret = hns3vf_set_promisc_mode(hw, true, false, false);
1462 ret = hns3vf_config_mtu(hw, mtu);
1464 goto err_init_hardware;
1466 ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1468 PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1469 goto err_init_hardware;
1472 ret = hns3_config_gro(hw, false);
1474 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1475 goto err_init_hardware;
1479 * In the initialization clearing the all hardware mapping relationship
1480 * configurations between queues and interrupt vectors is needed, so
1481 * some error caused by the residual configurations, such as the
1482 * unexpected interrupt, can be avoid.
1484 ret = hns3_init_ring_with_vector(hw);
1486 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
1487 goto err_init_hardware;
1493 (void)hns3vf_set_promisc_mode(hw, false, false, false);
1498 hns3vf_clear_vport_list(struct hns3_hw *hw)
1500 return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
1501 HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
1506 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1508 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1509 struct hns3_adapter *hns = eth_dev->data->dev_private;
1510 struct hns3_hw *hw = &hns->hw;
1513 PMD_INIT_FUNC_TRACE();
1515 /* Get hardware io base address from pcie BAR2 IO space */
1516 hw->io_base = pci_dev->mem_resource[2].addr;
1518 /* Firmware command queue initialize */
1519 ret = hns3_cmd_init_queue(hw);
1521 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1522 goto err_cmd_init_queue;
1525 /* Firmware command initialize */
1526 ret = hns3_cmd_init(hw);
1528 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1532 hns3_tx_push_init(eth_dev);
1534 /* Get VF resource */
1535 ret = hns3_query_vf_resource(hw);
1539 rte_spinlock_init(&hw->mbx_resp.lock);
1541 hns3vf_clear_event_cause(hw, 0);
1543 ret = rte_intr_callback_register(pci_dev->intr_handle,
1544 hns3vf_interrupt_handler, eth_dev);
1546 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1547 goto err_intr_callback_register;
1550 /* Enable interrupt */
1551 rte_intr_enable(pci_dev->intr_handle);
1552 hns3vf_enable_irq0(hw);
1554 /* Get configuration from PF */
1555 ret = hns3vf_get_configuration(hw);
1557 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1558 goto err_get_config;
1561 ret = hns3_tqp_stats_init(hw);
1563 goto err_get_config;
1565 /* Hardware statistics of imissed registers cleared. */
1566 ret = hns3_update_imissed_stats(hw, true);
1568 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
1569 goto err_set_tc_queue;
1572 ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num);
1574 PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
1575 goto err_set_tc_queue;
1578 ret = hns3vf_clear_vport_list(hw);
1580 PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1581 goto err_set_tc_queue;
1584 ret = hns3vf_init_hardware(hns);
1586 goto err_set_tc_queue;
1588 hns3_rss_set_default_args(hw);
1590 ret = hns3vf_set_alive(hw, true);
1592 PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1593 goto err_set_tc_queue;
1599 hns3_tqp_stats_uninit(hw);
1602 hns3vf_disable_irq0(hw);
1603 rte_intr_disable(pci_dev->intr_handle);
1604 hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1606 err_intr_callback_register:
1608 hns3_cmd_uninit(hw);
1609 hns3_cmd_destroy_queue(hw);
1617 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1619 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1620 struct hns3_adapter *hns = eth_dev->data->dev_private;
1621 struct hns3_hw *hw = &hns->hw;
1623 PMD_INIT_FUNC_TRACE();
1625 hns3_rss_uninit(hns);
1626 (void)hns3_config_gro(hw, false);
1627 (void)hns3vf_set_alive(hw, false);
1628 (void)hns3vf_set_promisc_mode(hw, false, false, false);
1629 hns3_flow_uninit(eth_dev);
1630 hns3_tqp_stats_uninit(hw);
1631 hns3vf_disable_irq0(hw);
1632 rte_intr_disable(pci_dev->intr_handle);
1633 hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1635 hns3_cmd_uninit(hw);
1636 hns3_cmd_destroy_queue(hw);
1641 hns3vf_do_stop(struct hns3_adapter *hns)
1643 struct hns3_hw *hw = &hns->hw;
1646 hw->mac.link_status = RTE_ETH_LINK_DOWN;
1649 * The "hns3vf_do_stop" function will also be called by .stop_service to
1650 * prepare reset. At the time of global or IMP reset, the command cannot
1651 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
1652 * accessed during the reset process. So the mbuf can not be released
1653 * during reset and is required to be released after the reset is
1656 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
1657 hns3_dev_release_mbufs(hns);
1659 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
1660 hns3_configure_all_mac_addr(hns, true);
1661 ret = hns3_reset_all_tqps(hns);
1663 hns3_err(hw, "failed to reset all queues ret = %d",
1672 hns3vf_dev_stop(struct rte_eth_dev *dev)
1674 struct hns3_adapter *hns = dev->data->dev_private;
1675 struct hns3_hw *hw = &hns->hw;
1677 PMD_INIT_FUNC_TRACE();
1678 dev->data->dev_started = 0;
1680 hw->adapter_state = HNS3_NIC_STOPPING;
1681 hns3_set_rxtx_function(dev);
1683 /* Disable datapath on secondary process. */
1684 hns3_mp_req_stop_rxtx(dev);
1685 /* Prevent crashes when queues are still in use. */
1686 rte_delay_ms(hw->cfg_max_queues);
1688 rte_spinlock_lock(&hw->lock);
1689 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1691 hns3vf_do_stop(hns);
1692 hns3_unmap_rx_interrupt(dev);
1693 hw->adapter_state = HNS3_NIC_CONFIGURED;
1695 hns3_rx_scattered_reset(dev);
1696 hns3vf_stop_poll_job(dev);
1697 hns3_stop_report_lse(dev);
1698 rte_spinlock_unlock(&hw->lock);
1704 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1706 struct hns3_adapter *hns = eth_dev->data->dev_private;
1707 struct hns3_hw *hw = &hns->hw;
1710 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1711 hns3_mp_uninit(eth_dev);
1715 if (hw->adapter_state == HNS3_NIC_STARTED)
1716 ret = hns3vf_dev_stop(eth_dev);
1718 hw->adapter_state = HNS3_NIC_CLOSING;
1719 hns3_reset_abort(hns);
1720 hw->adapter_state = HNS3_NIC_CLOSED;
1721 rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1722 hns3_configure_all_mc_mac_addr(hns, true);
1723 hns3vf_remove_all_vlan_table(hns);
1724 hns3vf_uninit_vf(eth_dev);
1725 hns3_free_all_queues(eth_dev);
1726 rte_free(hw->reset.wait_data);
1727 hns3_mp_uninit(eth_dev);
1728 hns3_warn(hw, "Close port %u finished", hw->data->port_id);
1734 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1735 __rte_unused int wait_to_complete)
1737 struct hns3_adapter *hns = eth_dev->data->dev_private;
1738 struct hns3_hw *hw = &hns->hw;
1739 struct hns3_mac *mac = &hw->mac;
1740 struct rte_eth_link new_link;
1742 memset(&new_link, 0, sizeof(new_link));
1743 switch (mac->link_speed) {
1744 case RTE_ETH_SPEED_NUM_10M:
1745 case RTE_ETH_SPEED_NUM_100M:
1746 case RTE_ETH_SPEED_NUM_1G:
1747 case RTE_ETH_SPEED_NUM_10G:
1748 case RTE_ETH_SPEED_NUM_25G:
1749 case RTE_ETH_SPEED_NUM_40G:
1750 case RTE_ETH_SPEED_NUM_50G:
1751 case RTE_ETH_SPEED_NUM_100G:
1752 case RTE_ETH_SPEED_NUM_200G:
1753 if (mac->link_status)
1754 new_link.link_speed = mac->link_speed;
1757 if (mac->link_status)
1758 new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
1762 if (!mac->link_status)
1763 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1765 new_link.link_duplex = mac->link_duplex;
1766 new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
1767 new_link.link_autoneg =
1768 !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
1770 return rte_eth_linkstatus_set(eth_dev, &new_link);
1774 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1776 struct hns3_hw *hw = &hns->hw;
1777 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1778 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1781 ret = hns3vf_set_tc_queue_mapping(hns, nb_rx_q, nb_tx_q);
1785 hns3_enable_rxd_adv_layout(hw);
1787 ret = hns3_init_queues(hns, reset_queue);
1789 hns3_err(hw, "failed to init queues, ret = %d.", ret);
1795 hns3vf_restore_filter(struct rte_eth_dev *dev)
1797 hns3_restore_rss_filter(dev);
1801 hns3vf_dev_start(struct rte_eth_dev *dev)
1803 struct hns3_adapter *hns = dev->data->dev_private;
1804 struct hns3_hw *hw = &hns->hw;
1807 PMD_INIT_FUNC_TRACE();
1808 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1811 rte_spinlock_lock(&hw->lock);
1812 hw->adapter_state = HNS3_NIC_STARTING;
1813 ret = hns3vf_do_start(hns, true);
1815 hw->adapter_state = HNS3_NIC_CONFIGURED;
1816 rte_spinlock_unlock(&hw->lock);
1819 ret = hns3_map_rx_interrupt(dev);
1821 goto map_rx_inter_err;
1824 * There are three register used to control the status of a TQP
1825 * (contains a pair of Tx queue and Rx queue) in the new version network
1826 * engine. One is used to control the enabling of Tx queue, the other is
1827 * used to control the enabling of Rx queue, and the last is the master
1828 * switch used to control the enabling of the tqp. The Tx register and
1829 * TQP register must be enabled at the same time to enable a Tx queue.
1830 * The same applies to the Rx queue. For the older network enginem, this
1831 * function only refresh the enabled flag, and it is used to update the
1832 * status of queue in the dpdk framework.
1834 ret = hns3_start_all_txqs(dev);
1836 goto map_rx_inter_err;
1838 ret = hns3_start_all_rxqs(dev);
1840 goto start_all_rxqs_fail;
1842 hw->adapter_state = HNS3_NIC_STARTED;
1843 rte_spinlock_unlock(&hw->lock);
1845 hns3_rx_scattered_calc(dev);
1846 hns3_set_rxtx_function(dev);
1847 hns3_mp_req_start_rxtx(dev);
1849 hns3vf_restore_filter(dev);
1851 /* Enable interrupt of all rx queues before enabling queues */
1852 hns3_dev_all_rx_queue_intr_enable(hw, true);
1853 hns3_start_tqps(hw);
1855 if (dev->data->dev_conf.intr_conf.lsc != 0)
1856 hns3vf_dev_link_update(dev, 0);
1857 hns3vf_start_poll_job(dev);
1861 start_all_rxqs_fail:
1862 hns3_stop_all_txqs(dev);
1864 (void)hns3vf_do_stop(hns);
1865 hw->adapter_state = HNS3_NIC_CONFIGURED;
1866 rte_spinlock_unlock(&hw->lock);
1872 is_vf_reset_done(struct hns3_hw *hw)
1874 #define HNS3_FUN_RST_ING_BITS \
1875 (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1876 BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1877 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1878 BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1882 if (hw->reset.level == HNS3_VF_RESET) {
1883 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1884 if (val & HNS3_VF_RST_ING_BIT)
1887 val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1888 if (val & HNS3_FUN_RST_ING_BITS)
1895 hns3vf_is_reset_pending(struct hns3_adapter *hns)
1897 struct hns3_hw *hw = &hns->hw;
1898 enum hns3_reset_level reset;
1901 * According to the protocol of PCIe, FLR to a PF device resets the PF
1902 * state as well as the SR-IOV extended capability including VF Enable
1903 * which means that VFs no longer exist.
1905 * HNS3_VF_FULL_RESET means PF device is in FLR reset. when PF device
1906 * is in FLR stage, the register state of VF device is not reliable,
1907 * so register states detection can not be carried out. In this case,
1908 * we just ignore the register states and return false to indicate that
1909 * there are no other reset states that need to be processed by driver.
1911 if (hw->reset.level == HNS3_VF_FULL_RESET)
1914 /* Check the registers to confirm whether there is reset pending */
1915 hns3vf_check_event_cause(hns, NULL);
1916 reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
1917 if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET &&
1918 hw->reset.level < reset) {
1919 hns3_warn(hw, "High level reset %d is pending", reset);
1926 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1928 struct hns3_hw *hw = &hns->hw;
1929 struct hns3_wait_data *wait_data = hw->reset.wait_data;
1932 if (wait_data->result == HNS3_WAIT_SUCCESS) {
1934 * After vf reset is ready, the PF may not have completed
1935 * the reset processing. The vf sending mbox to PF may fail
1936 * during the pf reset, so it is better to add extra delay.
1938 if (hw->reset.level == HNS3_VF_FUNC_RESET ||
1939 hw->reset.level == HNS3_FLR_RESET)
1941 /* Reset retry process, no need to add extra delay. */
1942 if (hw->reset.attempts)
1944 if (wait_data->check_completion == NULL)
1947 wait_data->check_completion = NULL;
1948 wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC;
1949 wait_data->count = 1;
1950 wait_data->result = HNS3_WAIT_REQUEST;
1951 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
1953 hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete");
1955 } else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1956 hns3_clock_gettime(&tv);
1957 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1958 tv.tv_sec, tv.tv_usec);
1960 } else if (wait_data->result == HNS3_WAIT_REQUEST)
1963 wait_data->hns = hns;
1964 wait_data->check_completion = is_vf_reset_done;
1965 wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1966 HNS3VF_RESET_WAIT_MS + hns3_clock_gettime_ms();
1967 wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1968 wait_data->count = HNS3VF_RESET_WAIT_CNT;
1969 wait_data->result = HNS3_WAIT_REQUEST;
1970 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1975 hns3vf_prepare_reset(struct hns3_adapter *hns)
1977 struct hns3_hw *hw = &hns->hw;
1980 if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1981 ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
1986 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
1992 hns3vf_stop_service(struct hns3_adapter *hns)
1994 struct hns3_hw *hw = &hns->hw;
1995 struct rte_eth_dev *eth_dev;
1997 eth_dev = &rte_eth_devices[hw->data->port_id];
1998 if (hw->adapter_state == HNS3_NIC_STARTED) {
2000 * Make sure call update link status before hns3vf_stop_poll_job
2001 * because update link status depend on polling job exist.
2003 hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
2004 hw->mac.link_duplex);
2005 hns3vf_stop_poll_job(eth_dev);
2007 hw->mac.link_status = RTE_ETH_LINK_DOWN;
2009 hns3_set_rxtx_function(eth_dev);
2011 /* Disable datapath on secondary process. */
2012 hns3_mp_req_stop_rxtx(eth_dev);
2013 rte_delay_ms(hw->cfg_max_queues);
2015 rte_spinlock_lock(&hw->lock);
2016 if (hw->adapter_state == HNS3_NIC_STARTED ||
2017 hw->adapter_state == HNS3_NIC_STOPPING) {
2018 hns3_enable_all_queues(hw, false);
2019 hns3vf_do_stop(hns);
2020 hw->reset.mbuf_deferred_free = true;
2022 hw->reset.mbuf_deferred_free = false;
2025 * It is cumbersome for hardware to pick-and-choose entries for deletion
2026 * from table space. Hence, for function reset software intervention is
2027 * required to delete the entries.
2029 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
2030 hns3_configure_all_mc_mac_addr(hns, true);
2031 rte_spinlock_unlock(&hw->lock);
2037 hns3vf_start_service(struct hns3_adapter *hns)
2039 struct hns3_hw *hw = &hns->hw;
2040 struct rte_eth_dev *eth_dev;
2042 eth_dev = &rte_eth_devices[hw->data->port_id];
2043 hns3_set_rxtx_function(eth_dev);
2044 hns3_mp_req_start_rxtx(eth_dev);
2045 if (hw->adapter_state == HNS3_NIC_STARTED) {
2046 hns3vf_start_poll_job(eth_dev);
2048 /* Enable interrupt of all rx queues before enabling queues */
2049 hns3_dev_all_rx_queue_intr_enable(hw, true);
2051 * Enable state of each rxq and txq will be recovered after
2052 * reset, so we need to restore them before enable all tqps;
2054 hns3_restore_tqp_enable_state(hw);
2056 * When finished the initialization, enable queues to receive
2057 * and transmit packets.
2059 hns3_enable_all_queues(hw, true);
2066 hns3vf_check_default_mac_change(struct hns3_hw *hw)
2068 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
2069 struct rte_ether_addr *hw_mac;
2073 * The hns3 PF ethdev driver in kernel support setting VF MAC address
2074 * on the host by "ip link set ..." command. If the hns3 PF kernel
2075 * ethdev driver sets the MAC address for VF device after the
2076 * initialization of the related VF device, the PF driver will notify
2077 * VF driver to reset VF device to make the new MAC address effective
2078 * immediately. The hns3 VF PMD driver should check whether the MAC
2079 * address has been changed by the PF kernel ethdev driver, if changed
2080 * VF driver should configure hardware using the new MAC address in the
2081 * recovering hardware configuration stage of the reset process.
2083 ret = hns3vf_get_host_mac_addr(hw);
2087 hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
2088 ret = rte_is_zero_ether_addr(hw_mac);
2090 rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
2092 ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
2094 rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
2095 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
2096 &hw->data->mac_addrs[0]);
2097 hns3_warn(hw, "Default MAC address has been changed to:"
2098 " %s by the host PF kernel ethdev driver",
2107 hns3vf_restore_conf(struct hns3_adapter *hns)
2109 struct hns3_hw *hw = &hns->hw;
2112 ret = hns3vf_check_default_mac_change(hw);
2116 ret = hns3_configure_all_mac_addr(hns, false);
2120 ret = hns3_configure_all_mc_mac_addr(hns, false);
2124 ret = hns3vf_restore_promisc(hns);
2126 goto err_vlan_table;
2128 ret = hns3vf_restore_vlan_conf(hns);
2130 goto err_vlan_table;
2132 ret = hns3vf_get_port_base_vlan_filter_state(hw);
2134 goto err_vlan_table;
2136 ret = hns3_restore_rx_interrupt(hw);
2138 goto err_vlan_table;
2140 ret = hns3_restore_gro_conf(hw);
2142 goto err_vlan_table;
2144 if (hw->adapter_state == HNS3_NIC_STARTED) {
2145 ret = hns3vf_do_start(hns, false);
2147 goto err_vlan_table;
2148 hns3_info(hw, "hns3vf dev restart successful!");
2149 } else if (hw->adapter_state == HNS3_NIC_STOPPING)
2150 hw->adapter_state = HNS3_NIC_CONFIGURED;
2152 ret = hns3vf_set_alive(hw, true);
2154 hns3_err(hw, "failed to VF send alive to PF: %d", ret);
2155 goto err_vlan_table;
2161 hns3_configure_all_mc_mac_addr(hns, true);
2163 hns3_configure_all_mac_addr(hns, true);
2167 static enum hns3_reset_level
2168 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
2170 enum hns3_reset_level reset_level;
2172 /* return the highest priority reset level amongst all */
2173 if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
2174 reset_level = HNS3_VF_RESET;
2175 else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
2176 reset_level = HNS3_VF_FULL_RESET;
2177 else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
2178 reset_level = HNS3_VF_PF_FUNC_RESET;
2179 else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
2180 reset_level = HNS3_VF_FUNC_RESET;
2181 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
2182 reset_level = HNS3_FLR_RESET;
2184 reset_level = HNS3_NONE_RESET;
2186 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
2187 return HNS3_NONE_RESET;
2193 hns3vf_reset_service(void *param)
2195 struct hns3_adapter *hns = (struct hns3_adapter *)param;
2196 struct hns3_hw *hw = &hns->hw;
2197 enum hns3_reset_level reset_level;
2198 struct timeval tv_delta;
2199 struct timeval tv_start;
2204 * The interrupt is not triggered within the delay time.
2205 * The interrupt may have been lost. It is necessary to handle
2206 * the interrupt to recover from the error.
2208 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2209 SCHEDULE_DEFERRED) {
2210 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
2212 hns3_err(hw, "Handling interrupts in delayed tasks");
2213 hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
2214 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2215 if (reset_level == HNS3_NONE_RESET) {
2216 hns3_err(hw, "No reset level is set, try global reset");
2217 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
2220 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
2223 * Hardware reset has been notified, we now have to poll & check if
2224 * hardware has actually completed the reset sequence.
2226 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2227 if (reset_level != HNS3_NONE_RESET) {
2228 hns3_clock_gettime(&tv_start);
2229 hns3_reset_process(hns, reset_level);
2230 hns3_clock_gettime(&tv);
2231 timersub(&tv, &tv_start, &tv_delta);
2232 msec = hns3_clock_calctime_ms(&tv_delta);
2233 if (msec > HNS3_RESET_PROCESS_MS)
2234 hns3_err(hw, "%d handle long time delta %" PRIu64
2235 " ms time=%ld.%.6ld",
2236 hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
2241 hns3vf_reinit_dev(struct hns3_adapter *hns)
2243 struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
2244 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2245 struct hns3_hw *hw = &hns->hw;
2248 if (hw->reset.level == HNS3_VF_FULL_RESET) {
2249 rte_intr_disable(pci_dev->intr_handle);
2250 ret = hns3vf_set_bus_master(pci_dev, true);
2252 hns3_err(hw, "failed to set pci bus, ret = %d", ret);
2257 /* Firmware command initialize */
2258 ret = hns3_cmd_init(hw);
2260 hns3_err(hw, "Failed to init cmd: %d", ret);
2264 if (hw->reset.level == HNS3_VF_FULL_RESET) {
2266 * UIO enables msix by writing the pcie configuration space
2267 * vfio_pci enables msix in rte_intr_enable.
2269 if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO ||
2270 pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) {
2271 if (hns3vf_enable_msix(pci_dev, true))
2272 hns3_err(hw, "Failed to enable msix");
2275 rte_intr_enable(pci_dev->intr_handle);
2278 ret = hns3_reset_all_tqps(hns);
2280 hns3_err(hw, "Failed to reset all queues: %d", ret);
2284 ret = hns3vf_init_hardware(hns);
2286 hns3_err(hw, "Failed to init hardware: %d", ret);
2293 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
2294 .dev_configure = hns3vf_dev_configure,
2295 .dev_start = hns3vf_dev_start,
2296 .dev_stop = hns3vf_dev_stop,
2297 .dev_close = hns3vf_dev_close,
2298 .mtu_set = hns3vf_dev_mtu_set,
2299 .promiscuous_enable = hns3vf_dev_promiscuous_enable,
2300 .promiscuous_disable = hns3vf_dev_promiscuous_disable,
2301 .allmulticast_enable = hns3vf_dev_allmulticast_enable,
2302 .allmulticast_disable = hns3vf_dev_allmulticast_disable,
2303 .stats_get = hns3_stats_get,
2304 .stats_reset = hns3_stats_reset,
2305 .xstats_get = hns3_dev_xstats_get,
2306 .xstats_get_names = hns3_dev_xstats_get_names,
2307 .xstats_reset = hns3_dev_xstats_reset,
2308 .xstats_get_by_id = hns3_dev_xstats_get_by_id,
2309 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
2310 .dev_infos_get = hns3_dev_infos_get,
2311 .fw_version_get = hns3_fw_version_get,
2312 .rx_queue_setup = hns3_rx_queue_setup,
2313 .tx_queue_setup = hns3_tx_queue_setup,
2314 .rx_queue_release = hns3_dev_rx_queue_release,
2315 .tx_queue_release = hns3_dev_tx_queue_release,
2316 .rx_queue_start = hns3_dev_rx_queue_start,
2317 .rx_queue_stop = hns3_dev_rx_queue_stop,
2318 .tx_queue_start = hns3_dev_tx_queue_start,
2319 .tx_queue_stop = hns3_dev_tx_queue_stop,
2320 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
2321 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
2322 .rxq_info_get = hns3_rxq_info_get,
2323 .txq_info_get = hns3_txq_info_get,
2324 .rx_burst_mode_get = hns3_rx_burst_mode_get,
2325 .tx_burst_mode_get = hns3_tx_burst_mode_get,
2326 .mac_addr_add = hns3_add_mac_addr,
2327 .mac_addr_remove = hns3_remove_mac_addr,
2328 .mac_addr_set = hns3vf_set_default_mac_addr,
2329 .set_mc_addr_list = hns3_set_mc_mac_addr_list,
2330 .link_update = hns3vf_dev_link_update,
2331 .rss_hash_update = hns3_dev_rss_hash_update,
2332 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
2333 .reta_update = hns3_dev_rss_reta_update,
2334 .reta_query = hns3_dev_rss_reta_query,
2335 .flow_ops_get = hns3_dev_flow_ops_get,
2336 .vlan_filter_set = hns3vf_vlan_filter_set,
2337 .vlan_offload_set = hns3vf_vlan_offload_set,
2338 .get_reg = hns3_get_regs,
2339 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
2340 .tx_done_cleanup = hns3_tx_done_cleanup,
2343 static const struct hns3_reset_ops hns3vf_reset_ops = {
2344 .reset_service = hns3vf_reset_service,
2345 .stop_service = hns3vf_stop_service,
2346 .prepare_reset = hns3vf_prepare_reset,
2347 .wait_hardware_ready = hns3vf_wait_hardware_ready,
2348 .reinit_dev = hns3vf_reinit_dev,
2349 .restore_conf = hns3vf_restore_conf,
2350 .start_service = hns3vf_start_service,
2354 hns3vf_init_hw_ops(struct hns3_hw *hw)
2356 hw->ops.add_mc_mac_addr = hns3vf_add_mc_mac_addr;
2357 hw->ops.del_mc_mac_addr = hns3vf_remove_mc_mac_addr;
2358 hw->ops.add_uc_mac_addr = hns3vf_add_uc_mac_addr;
2359 hw->ops.del_uc_mac_addr = hns3vf_remove_uc_mac_addr;
2360 hw->ops.bind_ring_with_vector = hns3vf_bind_ring_with_vector;
2364 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
2366 struct hns3_adapter *hns = eth_dev->data->dev_private;
2367 struct hns3_hw *hw = &hns->hw;
2370 PMD_INIT_FUNC_TRACE();
2372 hns3_flow_init(eth_dev);
2374 hns3_set_rxtx_function(eth_dev);
2375 eth_dev->dev_ops = &hns3vf_eth_dev_ops;
2376 eth_dev->rx_queue_count = hns3_rx_queue_count;
2377 ret = hns3_mp_init(eth_dev);
2381 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2382 hns3_tx_push_init(eth_dev);
2386 hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2388 hw->data = eth_dev->data;
2389 hns3_parse_devargs(eth_dev);
2391 ret = hns3_reset_init(hw);
2393 goto err_init_reset;
2394 hw->reset.ops = &hns3vf_reset_ops;
2396 hns3vf_init_hw_ops(hw);
2397 ret = hns3vf_init_vf(eth_dev);
2399 PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2403 /* Allocate memory for storing MAC addresses */
2404 eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac",
2405 sizeof(struct rte_ether_addr) *
2406 HNS3_VF_UC_MACADDR_NUM, 0);
2407 if (eth_dev->data->mac_addrs == NULL) {
2408 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
2409 "to store MAC addresses",
2410 sizeof(struct rte_ether_addr) *
2411 HNS3_VF_UC_MACADDR_NUM);
2413 goto err_rte_zmalloc;
2417 * The hns3 PF ethdev driver in kernel support setting VF MAC address
2418 * on the host by "ip link set ..." command. To avoid some incorrect
2419 * scenes, for example, hns3 VF PMD driver fails to receive and send
2420 * packets after user configure the MAC address by using the
2421 * "ip link set ..." command, hns3 VF PMD driver keep the same MAC
2422 * address strategy as the hns3 kernel ethdev driver in the
2423 * initialization. If user configure a MAC address by the ip command
2424 * for VF device, then hns3 VF PMD driver will start with it, otherwise
2425 * start with a random MAC address in the initialization.
2427 if (rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr))
2428 rte_eth_random_addr(hw->mac.mac_addr);
2429 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
2430 ð_dev->data->mac_addrs[0]);
2432 hw->adapter_state = HNS3_NIC_INITIALIZED;
2434 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2436 hns3_err(hw, "Reschedule reset service after dev_init");
2437 hns3_schedule_reset(hns);
2439 /* IMP will wait ready flag before reset */
2440 hns3_notify_reset_ready(hw, false);
2442 rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
2447 hns3vf_uninit_vf(eth_dev);
2450 rte_free(hw->reset.wait_data);
2453 hns3_mp_uninit(eth_dev);
2456 eth_dev->dev_ops = NULL;
2457 eth_dev->rx_pkt_burst = NULL;
2458 eth_dev->rx_descriptor_status = NULL;
2459 eth_dev->tx_pkt_burst = NULL;
2460 eth_dev->tx_pkt_prepare = NULL;
2461 eth_dev->tx_descriptor_status = NULL;
2467 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
2469 struct hns3_adapter *hns = eth_dev->data->dev_private;
2470 struct hns3_hw *hw = &hns->hw;
2472 PMD_INIT_FUNC_TRACE();
2474 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2475 __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
2476 hns3_mp_uninit(eth_dev);
2480 if (hw->adapter_state < HNS3_NIC_CLOSING)
2481 hns3vf_dev_close(eth_dev);
2483 hw->adapter_state = HNS3_NIC_REMOVED;
2488 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2489 struct rte_pci_device *pci_dev)
2491 return rte_eth_dev_pci_generic_probe(pci_dev,
2492 sizeof(struct hns3_adapter),
2497 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
2499 return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
2502 static const struct rte_pci_id pci_id_hns3vf_map[] = {
2503 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
2504 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
2505 { .vendor_id = 0, }, /* sentinel */
2508 static struct rte_pci_driver rte_hns3vf_pmd = {
2509 .id_table = pci_id_hns3vf_map,
2510 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2511 .probe = eth_hns3vf_pci_probe,
2512 .remove = eth_hns3vf_pci_remove,
2515 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
2516 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
2517 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");
2518 RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf,
2519 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
2520 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
2521 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
2522 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16_t> ");