1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
5 #include <linux/pci_regs.h>
7 #include <ethdev_pci.h>
12 #include "hns3_ethdev.h"
13 #include "hns3_logs.h"
14 #include "hns3_rxtx.h"
15 #include "hns3_regs.h"
16 #include "hns3_intr.h"
20 #define HNS3VF_KEEP_ALIVE_INTERVAL 2000000 /* us */
21 #define HNS3VF_SERVICE_INTERVAL 1000000 /* us */
23 #define HNS3VF_RESET_WAIT_MS 20
24 #define HNS3VF_RESET_WAIT_CNT 2000
26 /* Reset related Registers */
27 #define HNS3_GLOBAL_RESET_BIT 0
28 #define HNS3_CORE_RESET_BIT 1
29 #define HNS3_IMP_RESET_BIT 2
30 #define HNS3_FUN_RST_ING_B 0
32 enum hns3vf_evt_cause {
33 HNS3VF_VECTOR0_EVENT_RST,
34 HNS3VF_VECTOR0_EVENT_MBX,
35 HNS3VF_VECTOR0_EVENT_OTHER,
38 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
40 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
41 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
43 static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
44 struct rte_ether_addr *mac_addr);
45 static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
46 struct rte_ether_addr *mac_addr);
47 static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
48 __rte_unused int wait_to_complete);
50 /* set PCI bus mastering */
52 hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
57 ret = rte_pci_read_config(device, ®, sizeof(reg), PCI_COMMAND);
59 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
65 /* set the master bit */
66 reg |= PCI_COMMAND_MASTER;
68 reg &= ~(PCI_COMMAND_MASTER);
70 return rte_pci_write_config(device, ®, sizeof(reg), PCI_COMMAND);
74 * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
75 * @cap: the capability
77 * Return the address of the given capability within the PCI capability list.
80 hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
82 #define MAX_PCIE_CAPABILITY 48
89 ret = rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
91 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_STATUS);
95 if (!(status & PCI_STATUS_CAP_LIST))
98 ttl = MAX_PCIE_CAPABILITY;
99 ret = rte_pci_read_config(device, &pos, sizeof(pos),
100 PCI_CAPABILITY_LIST);
102 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
103 PCI_CAPABILITY_LIST);
107 while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
108 ret = rte_pci_read_config(device, &id, sizeof(id),
109 (pos + PCI_CAP_LIST_ID));
111 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
112 (pos + PCI_CAP_LIST_ID));
122 ret = rte_pci_read_config(device, &pos, sizeof(pos),
123 (pos + PCI_CAP_LIST_NEXT));
125 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
126 (pos + PCI_CAP_LIST_NEXT));
134 hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
140 pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
142 ret = rte_pci_read_config(device, &control, sizeof(control),
143 (pos + PCI_MSIX_FLAGS));
145 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
146 (pos + PCI_MSIX_FLAGS));
151 control |= PCI_MSIX_FLAGS_ENABLE;
153 control &= ~PCI_MSIX_FLAGS_ENABLE;
154 ret = rte_pci_write_config(device, &control, sizeof(control),
155 (pos + PCI_MSIX_FLAGS));
157 PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x",
158 (pos + PCI_MSIX_FLAGS));
169 hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
171 /* mac address was checked by upper level interface */
172 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
175 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
176 HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
177 RTE_ETHER_ADDR_LEN, false, NULL, 0);
179 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
181 hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
188 hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
190 /* mac address was checked by upper level interface */
191 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
194 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
195 HNS3_MBX_MAC_VLAN_UC_REMOVE,
196 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN,
199 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
201 hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
208 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
209 struct rte_ether_addr *mac_addr)
211 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
212 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
213 struct rte_ether_addr *old_addr;
214 uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
215 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
219 * It has been guaranteed that input parameter named mac_addr is valid
220 * address in the rte layer of DPDK framework.
222 old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
223 rte_spinlock_lock(&hw->lock);
224 memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
225 memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
228 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
229 HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
230 HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
233 * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev
234 * driver. When user has configured a MAC address for VF device
235 * by "ip link set ..." command based on the PF device, the hns3
236 * PF kernel ethdev driver does not allow VF driver to request
237 * reconfiguring a different default MAC address, and return
238 * -EPREM to VF driver through mailbox.
241 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
243 hns3_warn(hw, "Has permanet mac addr(%s) for vf",
246 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
248 hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
253 rte_ether_addr_copy(mac_addr,
254 (struct rte_ether_addr *)hw->mac.mac_addr);
255 rte_spinlock_unlock(&hw->lock);
261 hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
262 struct rte_ether_addr *mac_addr)
264 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
267 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
268 HNS3_MBX_MAC_VLAN_MC_ADD,
269 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
272 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
274 hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
282 hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
283 struct rte_ether_addr *mac_addr)
285 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
288 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
289 HNS3_MBX_MAC_VLAN_MC_REMOVE,
290 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
293 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
295 hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
303 hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw,
304 struct rte_ether_addr *mc_addr_set,
307 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
308 struct rte_ether_addr *addr;
312 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
313 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
314 "invalid. valid range: 0~%d",
315 nb_mc_addr, HNS3_MC_MACADDR_NUM);
319 /* Check if input mac addresses are valid */
320 for (i = 0; i < nb_mc_addr; i++) {
321 addr = &mc_addr_set[i];
322 if (!rte_is_multicast_ether_addr(addr)) {
323 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
326 "failed to set mc mac addr, addr(%s) invalid.",
331 /* Check if there are duplicate addresses */
332 for (j = i + 1; j < nb_mc_addr; j++) {
333 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
334 hns3_ether_format_addr(mac_str,
335 RTE_ETHER_ADDR_FMT_SIZE,
337 hns3_err(hw, "failed to set mc mac addr, "
338 "addrs invalid. two same addrs(%s).",
345 * Check if there are duplicate addresses between mac_addrs
348 for (j = 0; j < HNS3_VF_UC_MACADDR_NUM; j++) {
349 if (rte_is_same_ether_addr(addr,
350 &hw->data->mac_addrs[j])) {
351 hns3_ether_format_addr(mac_str,
352 RTE_ETHER_ADDR_FMT_SIZE,
354 hns3_err(hw, "failed to set mc mac addr, "
355 "addrs invalid. addrs(%s) has already "
356 "configured in mac_addr add API",
367 hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev,
368 struct rte_ether_addr *mc_addr_set,
371 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
372 struct rte_ether_addr *addr;
379 ret = hns3vf_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
383 rte_spinlock_lock(&hw->lock);
384 cur_addr_num = hw->mc_addrs_num;
385 for (i = 0; i < cur_addr_num; i++) {
386 num = cur_addr_num - i - 1;
387 addr = &hw->mc_addrs[num];
388 ret = hw->ops.del_mc_mac_addr(hw, addr);
390 rte_spinlock_unlock(&hw->lock);
397 set_addr_num = (int)nb_mc_addr;
398 for (i = 0; i < set_addr_num; i++) {
399 addr = &mc_addr_set[i];
400 ret = hw->ops.add_mc_mac_addr(hw, addr);
402 rte_spinlock_unlock(&hw->lock);
406 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
409 rte_spinlock_unlock(&hw->lock);
415 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
416 bool en_uc_pmc, bool en_mc_pmc)
418 struct hns3_mbx_vf_to_pf_cmd *req;
419 struct hns3_cmd_desc desc;
422 req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
425 * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev driver,
426 * so there are some features for promiscuous/allmulticast mode in hns3
427 * VF PMD driver as below:
428 * 1. The promiscuous/allmulticast mode can be configured successfully
429 * only based on the trusted VF device. If based on the non trusted
430 * VF device, configuring promiscuous/allmulticast mode will fail.
431 * The hns3 VF device can be confiruged as trusted device by hns3 PF
432 * kernel ethdev driver on the host by the following command:
433 * "ip link set <eth num> vf <vf id> turst on"
434 * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
435 * driver can receive the ingress and outgoing traffic. In the words,
436 * all the ingress packets, all the packets sent from the PF and
437 * other VFs on the same physical port.
438 * 3. Note: Because of the hardware constraints, By default vlan filter
439 * is enabled and couldn't be turned off based on VF device, so vlan
440 * filter is still effective even in promiscuous mode. If upper
441 * applications don't call rte_eth_dev_vlan_filter API function to
442 * set vlan based on VF device, hns3 VF PMD driver will can't receive
443 * the packets with vlan tag in promiscuoue mode.
445 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
446 req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
447 req->msg[1] = en_bc_pmc ? 1 : 0;
448 req->msg[2] = en_uc_pmc ? 1 : 0;
449 req->msg[3] = en_mc_pmc ? 1 : 0;
450 req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0;
452 ret = hns3_cmd_send(hw, &desc, 1);
454 hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
460 hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
462 struct hns3_adapter *hns = dev->data->dev_private;
463 struct hns3_hw *hw = &hns->hw;
466 ret = hns3vf_set_promisc_mode(hw, true, true, true);
468 hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
474 hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
476 bool allmulti = dev->data->all_multicast ? true : false;
477 struct hns3_adapter *hns = dev->data->dev_private;
478 struct hns3_hw *hw = &hns->hw;
481 ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
483 hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
489 hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
491 struct hns3_adapter *hns = dev->data->dev_private;
492 struct hns3_hw *hw = &hns->hw;
495 if (dev->data->promiscuous)
498 ret = hns3vf_set_promisc_mode(hw, true, false, true);
500 hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
506 hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
508 struct hns3_adapter *hns = dev->data->dev_private;
509 struct hns3_hw *hw = &hns->hw;
512 if (dev->data->promiscuous)
515 ret = hns3vf_set_promisc_mode(hw, true, false, false);
517 hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
523 hns3vf_restore_promisc(struct hns3_adapter *hns)
525 struct hns3_hw *hw = &hns->hw;
526 bool allmulti = hw->data->all_multicast ? true : false;
528 if (hw->data->promiscuous)
529 return hns3vf_set_promisc_mode(hw, true, true, true);
531 return hns3vf_set_promisc_mode(hw, true, false, allmulti);
535 hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
536 bool mmap, enum hns3_ring_type queue_type,
539 struct hns3_vf_bind_vector_msg bind_msg;
544 memset(&bind_msg, 0, sizeof(bind_msg));
545 code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
546 HNS3_MBX_UNMAP_RING_TO_VECTOR;
547 bind_msg.vector_id = vector_id;
549 if (queue_type == HNS3_RING_TYPE_RX)
550 bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
552 bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
554 bind_msg.param[0].ring_type = queue_type;
555 bind_msg.ring_num = 1;
556 bind_msg.param[0].tqp_index = queue_id;
557 op_str = mmap ? "Map" : "Unmap";
558 ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
559 sizeof(bind_msg), false, NULL, 0);
561 hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.",
562 op_str, queue_id, bind_msg.vector_id, ret);
568 hns3vf_init_ring_with_vector(struct hns3_hw *hw)
575 * In hns3 network engine, vector 0 is always the misc interrupt of this
576 * function, vector 1~N can be used respectively for the queues of the
577 * function. Tx and Rx queues with the same number share the interrupt
578 * vector. In the initialization clearing the all hardware mapping
579 * relationship configurations between queues and interrupt vectors is
580 * needed, so some error caused by the residual configurations, such as
581 * the unexpected Tx interrupt, can be avoid.
583 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
584 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
585 vec = vec - 1; /* the last interrupt is reserved */
586 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
587 for (i = 0; i < hw->intr_tqps_num; i++) {
589 * Set gap limiter/rate limiter/quanity limiter algorithm
590 * configuration for interrupt coalesce of queue's interrupt.
592 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
593 HNS3_TQP_INTR_GL_DEFAULT);
594 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
595 HNS3_TQP_INTR_GL_DEFAULT);
596 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
598 * QL(quantity limiter) is not used currently, just set 0 to
601 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
603 ret = hns3vf_bind_ring_with_vector(hw, vec, false,
604 HNS3_RING_TYPE_TX, i);
606 PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
607 "vector: %u, ret=%d", i, vec, ret);
611 ret = hns3vf_bind_ring_with_vector(hw, vec, false,
612 HNS3_RING_TYPE_RX, i);
614 PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
615 "vector: %u, ret=%d", i, vec, ret);
624 hns3vf_dev_configure(struct rte_eth_dev *dev)
626 struct hns3_adapter *hns = dev->data->dev_private;
627 struct hns3_hw *hw = &hns->hw;
628 struct rte_eth_conf *conf = &dev->data->dev_conf;
629 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
630 uint16_t nb_rx_q = dev->data->nb_rx_queues;
631 uint16_t nb_tx_q = dev->data->nb_tx_queues;
632 struct rte_eth_rss_conf rss_conf;
636 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
639 * Some versions of hardware network engine does not support
640 * individually enable/disable/reset the Tx or Rx queue. These devices
641 * must enable/disable/reset Tx and Rx queues at the same time. When the
642 * numbers of Tx queues allocated by upper applications are not equal to
643 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
644 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
645 * work as usual. But these fake queues are imperceptible, and can not
646 * be used by upper applications.
648 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
650 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
651 hw->cfg_max_queues = 0;
655 hw->adapter_state = HNS3_NIC_CONFIGURING;
656 if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
657 hns3_err(hw, "setting link speed/duplex not supported");
662 /* When RSS is not configured, redirect the packet queue 0 */
663 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
664 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
665 hw->rss_dis_flag = false;
666 rss_conf = conf->rx_adv_conf.rss_conf;
667 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
672 ret = hns3vf_dev_mtu_set(dev, conf->rxmode.mtu);
676 ret = hns3vf_dev_configure_vlan(dev);
680 /* config hardware GRO */
681 gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
682 ret = hns3_config_gro(hw, gro_en);
686 hns3_init_rx_ptype_tble(dev);
688 hw->adapter_state = HNS3_NIC_CONFIGURED;
692 hw->cfg_max_queues = 0;
693 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
694 hw->adapter_state = HNS3_NIC_INITIALIZED;
700 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
704 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
705 sizeof(mtu), true, NULL, 0);
707 hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
713 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
715 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
716 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
720 * The hns3 PF/VF devices on the same port share the hardware MTU
721 * configuration. Currently, we send mailbox to inform hns3 PF kernel
722 * ethdev driver to finish hardware MTU configuration in hns3 VF PMD
723 * driver, there is no need to stop the port for hns3 VF device, and the
724 * MTU value issued by hns3 VF PMD driver must be less than or equal to
727 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
728 hns3_err(hw, "Failed to set mtu during resetting");
733 * when Rx of scattered packets is off, we have some possibility of
734 * using vector Rx process function or simple Rx functions in hns3 PMD
735 * driver. If the input MTU is increased and the maximum length of
736 * received packets is greater than the length of a buffer for Rx
737 * packet, the hardware network engine needs to use multiple BDs and
738 * buffers to store these packets. This will cause problems when still
739 * using vector Rx process function or simple Rx function to receiving
740 * packets. So, when Rx of scattered packets is off and device is
741 * started, it is not permitted to increase MTU so that the maximum
742 * length of Rx packets is greater than Rx buffer length.
744 if (dev->data->dev_started && !dev->data->scattered_rx &&
745 frame_size > hw->rx_buf_len) {
746 hns3_err(hw, "failed to set mtu because current is "
747 "not scattered rx mode");
751 rte_spinlock_lock(&hw->lock);
752 ret = hns3vf_config_mtu(hw, mtu);
754 rte_spinlock_unlock(&hw->lock);
757 rte_spinlock_unlock(&hw->lock);
763 hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
765 struct hns3_adapter *hns = eth_dev->data->dev_private;
766 struct hns3_hw *hw = &hns->hw;
767 uint16_t q_num = hw->tqps_num;
770 * In interrupt mode, 'max_rx_queues' is set based on the number of
771 * MSI-X interrupt resources of the hardware.
773 if (hw->data->dev_conf.intr_conf.rxq == 1)
774 q_num = hw->intr_tqps_num;
776 info->max_rx_queues = q_num;
777 info->max_tx_queues = hw->tqps_num;
778 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
779 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
780 info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
781 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
782 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
784 info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
785 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
786 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
787 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
788 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
789 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
790 RTE_ETH_RX_OFFLOAD_SCATTER |
791 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
792 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
793 RTE_ETH_RX_OFFLOAD_RSS_HASH |
794 RTE_ETH_RX_OFFLOAD_TCP_LRO);
795 info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
796 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
797 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
798 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
799 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
800 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
801 RTE_ETH_TX_OFFLOAD_TCP_TSO |
802 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
803 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
804 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
805 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
806 hns3_txvlan_cap_get(hw));
808 if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
809 info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
811 if (hns3_dev_get_support(hw, INDEP_TXRX))
812 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
813 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
815 info->rx_desc_lim = (struct rte_eth_desc_lim) {
816 .nb_max = HNS3_MAX_RING_DESC,
817 .nb_min = HNS3_MIN_RING_DESC,
818 .nb_align = HNS3_ALIGN_RING_DESC,
821 info->tx_desc_lim = (struct rte_eth_desc_lim) {
822 .nb_max = HNS3_MAX_RING_DESC,
823 .nb_min = HNS3_MIN_RING_DESC,
824 .nb_align = HNS3_ALIGN_RING_DESC,
825 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
826 .nb_mtu_seg_max = hw->max_non_tso_bd_num,
829 info->default_rxconf = (struct rte_eth_rxconf) {
830 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
832 * If there are no available Rx buffer descriptors, incoming
833 * packets are always dropped by hardware based on hns3 network
839 info->default_txconf = (struct rte_eth_txconf) {
840 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
844 info->reta_size = hw->rss_ind_tbl_size;
845 info->hash_key_size = HNS3_RSS_KEY_SIZE;
846 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
848 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
849 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
850 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
851 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
852 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
853 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
859 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
861 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
865 hns3vf_disable_irq0(struct hns3_hw *hw)
867 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
871 hns3vf_enable_irq0(struct hns3_hw *hw)
873 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
876 static enum hns3vf_evt_cause
877 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
879 struct hns3_hw *hw = &hns->hw;
880 enum hns3vf_evt_cause ret;
881 uint32_t cmdq_stat_reg;
882 uint32_t rst_ing_reg;
885 /* Fetch the events from their corresponding regs */
886 cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
887 if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
888 rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
889 hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
890 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
891 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
892 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
893 hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
894 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
896 hw->reset.stats.global_cnt++;
897 hns3_warn(hw, "Global reset detected, clear reset status");
899 hns3_schedule_delayed_reset(hns);
900 hns3_warn(hw, "Global reset detected, don't clear reset status");
903 ret = HNS3VF_VECTOR0_EVENT_RST;
907 /* Check for vector0 mailbox(=CMDQ RX) event source */
908 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
909 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
910 ret = HNS3VF_VECTOR0_EVENT_MBX;
915 ret = HNS3VF_VECTOR0_EVENT_OTHER;
923 hns3vf_interrupt_handler(void *param)
925 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
926 struct hns3_adapter *hns = dev->data->dev_private;
927 struct hns3_hw *hw = &hns->hw;
928 enum hns3vf_evt_cause event_cause;
931 /* Disable interrupt */
932 hns3vf_disable_irq0(hw);
934 /* Read out interrupt causes */
935 event_cause = hns3vf_check_event_cause(hns, &clearval);
936 /* Clear interrupt causes */
937 hns3vf_clear_event_cause(hw, clearval);
939 switch (event_cause) {
940 case HNS3VF_VECTOR0_EVENT_RST:
941 hns3_schedule_reset(hns);
943 case HNS3VF_VECTOR0_EVENT_MBX:
944 hns3_dev_handle_mbx_msg(hw);
950 /* Enable interrupt */
951 hns3vf_enable_irq0(hw);
955 hns3vf_set_default_dev_specifications(struct hns3_hw *hw)
957 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
958 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
959 hw->rss_key_size = HNS3_RSS_KEY_SIZE;
960 hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
964 hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
966 struct hns3_dev_specs_0_cmd *req0;
968 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
970 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
971 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
972 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
973 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
977 hns3vf_check_dev_specifications(struct hns3_hw *hw)
979 if (hw->rss_ind_tbl_size == 0 ||
980 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
981 hns3_warn(hw, "the size of hash lookup table configured (%u)"
982 " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
983 HNS3_RSS_IND_TBL_SIZE_MAX);
991 hns3vf_query_dev_specifications(struct hns3_hw *hw)
993 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
997 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
998 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
1000 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1002 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
1004 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
1008 hns3vf_parse_dev_specifications(hw, desc);
1010 return hns3vf_check_dev_specifications(hw);
1014 hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
1016 uint16_t val = supported ? HNS3_PF_PUSH_LSC_CAP_SUPPORTED :
1017 HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
1018 uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
1019 struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1021 if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
1022 __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
1023 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
1027 hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
1029 #define HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS 500
1031 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1032 int32_t remain_ms = HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS;
1033 uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
1034 uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
1035 struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1037 __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
1040 (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
1043 while (remain_ms > 0) {
1044 rte_delay_ms(HNS3_POLL_RESPONE_MS);
1045 if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
1046 HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
1052 * When exit above loop, the pf_push_lsc_cap could be one of the three
1053 * state: unknown (means pf not ack), not_supported, supported.
1054 * Here config it as 'not_supported' when it's 'unknown' state.
1056 __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
1057 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
1059 if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
1060 HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
1061 hns3_info(hw, "detect PF support push link status change!");
1064 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
1065 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. So here cleared
1066 * the RTE_ETH_DEV_INTR_LSC capability.
1068 dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1073 hns3vf_get_capability(struct hns3_hw *hw)
1075 struct rte_pci_device *pci_dev;
1076 struct rte_eth_dev *eth_dev;
1080 eth_dev = &rte_eth_devices[hw->data->port_id];
1081 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1083 /* Get PCI revision id */
1084 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
1085 HNS3_PCI_REVISION_ID);
1086 if (ret != HNS3_PCI_REVISION_ID_LEN) {
1087 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
1091 hw->revision = revision;
1093 if (revision < PCI_REVISION_ID_HIP09_A) {
1094 hns3vf_set_default_dev_specifications(hw);
1095 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
1096 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
1097 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
1098 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
1099 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
1100 hw->rss_info.ipv6_sctp_offload_supported = false;
1101 hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
1105 ret = hns3vf_query_dev_specifications(hw);
1108 "failed to query dev specifications, ret = %d",
1113 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
1114 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
1115 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
1116 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
1117 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
1118 hw->rss_info.ipv6_sctp_offload_supported = true;
1119 hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
1125 hns3vf_check_tqp_info(struct hns3_hw *hw)
1127 if (hw->tqps_num == 0) {
1128 PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF.");
1132 if (hw->rss_size_max == 0) {
1133 PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF.");
1137 hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num);
1143 hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
1148 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
1149 HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0,
1150 true, &resp_msg, sizeof(resp_msg));
1152 if (ret == -ETIME) {
1154 * Getting current port based VLAN state from PF driver
1155 * will not affect VF driver's basic function. Because
1156 * the VF driver relies on hns3 PF kernel ether driver,
1157 * to avoid introducing compatibility issues with older
1158 * version of PF driver, no failure will be returned
1159 * when the return value is ETIME. This return value has
1160 * the following scenarios:
1161 * 1) Firmware didn't return the results in time
1162 * 2) the result return by firmware is timeout
1163 * 3) the older version of kernel side PF driver does
1164 * not support this mailbox message.
1165 * For scenarios 1 and 2, it is most likely that a
1166 * hardware error has occurred, or a hardware reset has
1167 * occurred. In this case, these errors will be caught
1168 * by other functions.
1170 PMD_INIT_LOG(WARNING,
1171 "failed to get PVID state for timeout, maybe "
1172 "kernel side PF driver doesn't support this "
1173 "mailbox message, or firmware didn't respond.");
1174 resp_msg = HNS3_PORT_BASE_VLAN_DISABLE;
1176 PMD_INIT_LOG(ERR, "failed to get port based VLAN state,"
1181 hw->port_base_vlan_cfg.state = resp_msg ?
1182 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
1187 hns3vf_get_queue_info(struct hns3_hw *hw)
1189 #define HNS3VF_TQPS_RSS_INFO_LEN 6
1190 uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
1193 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
1194 resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
1196 PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
1200 memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
1201 memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
1203 return hns3vf_check_tqp_info(hw);
1207 hns3vf_get_queue_depth(struct hns3_hw *hw)
1209 #define HNS3VF_TQPS_DEPTH_INFO_LEN 4
1210 uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN];
1213 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true,
1214 resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN);
1216 PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d",
1221 memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t));
1222 memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t));
1228 hns3vf_update_caps(struct hns3_hw *hw, uint32_t caps)
1230 if (hns3_get_bit(caps, HNS3VF_CAPS_VLAN_FLT_MOD_B))
1231 hns3_set_bit(hw->capability,
1232 HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 1);
1236 hns3vf_get_num_tc(struct hns3_hw *hw)
1241 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
1242 if (hw->hw_tc_map & BIT(i))
1249 hns3vf_get_basic_info(struct hns3_hw *hw)
1251 uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE];
1252 struct hns3_basic_info *basic_info;
1255 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0,
1256 true, resp_msg, sizeof(resp_msg));
1258 hns3_err(hw, "failed to get basic info from PF, ret = %d.",
1263 basic_info = (struct hns3_basic_info *)resp_msg;
1264 hw->hw_tc_map = basic_info->hw_tc_map;
1265 hw->num_tc = hns3vf_get_num_tc(hw);
1266 hw->pf_vf_if_version = basic_info->pf_vf_if_version;
1267 hns3vf_update_caps(hw, basic_info->caps);
1273 hns3vf_get_host_mac_addr(struct hns3_hw *hw)
1275 uint8_t host_mac[RTE_ETHER_ADDR_LEN];
1278 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
1279 true, host_mac, RTE_ETHER_ADDR_LEN);
1281 hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
1285 memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
1291 hns3vf_get_configuration(struct hns3_hw *hw)
1295 hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
1296 hw->rss_dis_flag = false;
1298 /* Get device capability */
1299 ret = hns3vf_get_capability(hw);
1301 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
1305 hns3vf_get_push_lsc_cap(hw);
1307 /* Get basic info from PF */
1308 ret = hns3vf_get_basic_info(hw);
1312 /* Get queue configuration from PF */
1313 ret = hns3vf_get_queue_info(hw);
1317 /* Get queue depth info from PF */
1318 ret = hns3vf_get_queue_depth(hw);
1322 /* Get user defined VF MAC addr from PF */
1323 ret = hns3vf_get_host_mac_addr(hw);
1327 return hns3vf_get_port_base_vlan_filter_state(hw);
1331 hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q,
1334 struct hns3_hw *hw = &hns->hw;
1336 return hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
1340 hns3vf_request_link_info(struct hns3_hw *hw)
1342 struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1346 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1349 send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
1350 vf->req_link_info_cnt > 0;
1354 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
1357 hns3_err(hw, "failed to fetch link status, ret = %d", ret);
1361 if (vf->req_link_info_cnt > 0)
1362 vf->req_link_info_cnt--;
1366 hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
1367 uint32_t link_speed, uint8_t link_duplex)
1369 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1370 struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1371 struct hns3_mac *mac = &hw->mac;
1375 * PF kernel driver may push link status when VF driver is in resetting,
1376 * driver will stop polling job in this case, after resetting done
1377 * driver will start polling job again.
1378 * When polling job started, driver will get initial link status by
1379 * sending request to PF kernel driver, then could update link status by
1380 * process PF kernel driver's link status mailbox message.
1382 if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
1385 if (hw->adapter_state != HNS3_NIC_STARTED)
1388 mac->link_status = link_status;
1389 mac->link_speed = link_speed;
1390 mac->link_duplex = link_duplex;
1391 ret = hns3vf_dev_link_update(dev, 0);
1392 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
1393 hns3_start_report_lse(dev);
1397 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
1399 #define HNS3VF_VLAN_MBX_MSG_LEN 5
1400 struct hns3_hw *hw = &hns->hw;
1401 uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
1402 uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
1403 uint8_t is_kill = on ? 0 : 1;
1405 msg_data[0] = is_kill;
1406 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1407 memcpy(&msg_data[3], &proto, sizeof(proto));
1409 return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
1410 msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
1415 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1417 struct hns3_adapter *hns = dev->data->dev_private;
1418 struct hns3_hw *hw = &hns->hw;
1421 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1423 "vf set vlan id failed during resetting, vlan_id =%u",
1427 rte_spinlock_lock(&hw->lock);
1428 ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1429 rte_spinlock_unlock(&hw->lock);
1431 hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
1438 hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
1443 if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD))
1446 msg_data = enable ? 1 : 0;
1447 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
1448 HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data,
1449 sizeof(msg_data), true, NULL, 0);
1451 hns3_err(hw, "%s vlan filter failed, ret = %d.",
1452 enable ? "enable" : "disable", ret);
1458 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
1463 msg_data = enable ? 1 : 0;
1464 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
1465 &msg_data, sizeof(msg_data), false, NULL, 0);
1467 hns3_err(hw, "vf %s strip failed, ret = %d.",
1468 enable ? "enable" : "disable", ret);
1474 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1476 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1477 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1478 unsigned int tmp_mask;
1481 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1482 hns3_err(hw, "vf set vlan offload failed during resetting, "
1483 "mask = 0x%x", mask);
1487 tmp_mask = (unsigned int)mask;
1489 if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
1490 rte_spinlock_lock(&hw->lock);
1491 /* Enable or disable VLAN filter */
1492 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1493 ret = hns3vf_en_vlan_filter(hw, true);
1495 ret = hns3vf_en_vlan_filter(hw, false);
1496 rte_spinlock_unlock(&hw->lock);
1501 /* Vlan stripping setting */
1502 if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
1503 rte_spinlock_lock(&hw->lock);
1504 /* Enable or disable VLAN stripping */
1505 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1506 ret = hns3vf_en_hw_strip_rxvtag(hw, true);
1508 ret = hns3vf_en_hw_strip_rxvtag(hw, false);
1509 rte_spinlock_unlock(&hw->lock);
1516 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
1518 struct rte_vlan_filter_conf *vfc;
1519 struct hns3_hw *hw = &hns->hw;
1526 vfc = &hw->data->vlan_filter_conf;
1527 for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1528 if (vfc->ids[i] == 0)
1533 * 64 means the num bits of ids, one bit corresponds to
1537 /* count trailing zeroes */
1538 vbit = ~ids & (ids - 1);
1539 /* clear least significant bit set */
1540 ids ^= (ids ^ (ids - 1)) ^ vbit;
1545 ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1548 "VF handle vlan table failed, ret =%d, on = %d",
1559 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1561 return hns3vf_handle_all_vlan_table(hns, 0);
1565 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1567 struct hns3_hw *hw = &hns->hw;
1568 struct rte_eth_conf *dev_conf;
1572 dev_conf = &hw->data->dev_conf;
1573 en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
1575 ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1577 hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1583 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1585 struct hns3_adapter *hns = dev->data->dev_private;
1586 struct rte_eth_dev_data *data = dev->data;
1587 struct hns3_hw *hw = &hns->hw;
1590 if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1591 data->dev_conf.txmode.hw_vlan_reject_untagged ||
1592 data->dev_conf.txmode.hw_vlan_insert_pvid) {
1593 hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1594 "or hw_vlan_insert_pvid is not support!");
1597 /* Apply vlan offload setting */
1598 ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
1599 RTE_ETH_VLAN_FILTER_MASK);
1601 hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
1607 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1611 msg_data = alive ? 1 : 0;
1612 return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
1613 sizeof(msg_data), false, NULL, 0);
1617 hns3vf_keep_alive_handler(void *param)
1619 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1620 struct hns3_adapter *hns = eth_dev->data->dev_private;
1621 struct hns3_hw *hw = &hns->hw;
1624 ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
1627 hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1630 rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1635 hns3vf_service_handler(void *param)
1637 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1638 struct hns3_adapter *hns = eth_dev->data->dev_private;
1639 struct hns3_hw *hw = &hns->hw;
1642 * The query link status and reset processing are executed in the
1643 * interrupt thread. When the IMP reset occurs, IMP will not respond,
1644 * and the query operation will timeout after 30ms. In the case of
1645 * multiple PF/VFs, each query failure timeout causes the IMP reset
1646 * interrupt to fail to respond within 100ms.
1647 * Before querying the link status, check whether there is a reset
1648 * pending, and if so, abandon the query.
1650 if (!hns3vf_is_reset_pending(hns))
1651 hns3vf_request_link_info(hw);
1653 hns3_warn(hw, "Cancel the query when reset is pending");
1655 rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1660 hns3vf_start_poll_job(struct rte_eth_dev *dev)
1662 #define HNS3_REQUEST_LINK_INFO_REMAINS_CNT 3
1664 struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1666 if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
1667 vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
1669 __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
1671 hns3vf_service_handler(dev);
1675 hns3vf_stop_poll_job(struct rte_eth_dev *dev)
1677 struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1679 rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1681 __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
1685 hns3_query_vf_resource(struct hns3_hw *hw)
1687 struct hns3_vf_res_cmd *req;
1688 struct hns3_cmd_desc desc;
1692 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
1693 ret = hns3_cmd_send(hw, &desc, 1);
1695 hns3_err(hw, "query vf resource failed, ret = %d", ret);
1699 req = (struct hns3_vf_res_cmd *)desc.data;
1700 num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
1701 HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
1702 if (num_msi < HNS3_MIN_VECTOR_NUM) {
1703 hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
1704 num_msi, HNS3_MIN_VECTOR_NUM);
1708 hw->num_msi = num_msi;
1714 hns3vf_init_hardware(struct hns3_adapter *hns)
1716 struct hns3_hw *hw = &hns->hw;
1717 uint16_t mtu = hw->data->mtu;
1720 ret = hns3vf_set_promisc_mode(hw, true, false, false);
1724 ret = hns3vf_config_mtu(hw, mtu);
1726 goto err_init_hardware;
1728 ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1730 PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1731 goto err_init_hardware;
1734 ret = hns3_config_gro(hw, false);
1736 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1737 goto err_init_hardware;
1741 * In the initialization clearing the all hardware mapping relationship
1742 * configurations between queues and interrupt vectors is needed, so
1743 * some error caused by the residual configurations, such as the
1744 * unexpected interrupt, can be avoid.
1746 ret = hns3vf_init_ring_with_vector(hw);
1748 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
1749 goto err_init_hardware;
1755 (void)hns3vf_set_promisc_mode(hw, false, false, false);
1760 hns3vf_clear_vport_list(struct hns3_hw *hw)
1762 return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
1763 HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
1768 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1770 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1771 struct hns3_adapter *hns = eth_dev->data->dev_private;
1772 struct hns3_hw *hw = &hns->hw;
1775 PMD_INIT_FUNC_TRACE();
1777 /* Get hardware io base address from pcie BAR2 IO space */
1778 hw->io_base = pci_dev->mem_resource[2].addr;
1780 /* Firmware command queue initialize */
1781 ret = hns3_cmd_init_queue(hw);
1783 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1784 goto err_cmd_init_queue;
1787 /* Firmware command initialize */
1788 ret = hns3_cmd_init(hw);
1790 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1794 hns3_tx_push_init(eth_dev);
1796 /* Get VF resource */
1797 ret = hns3_query_vf_resource(hw);
1801 rte_spinlock_init(&hw->mbx_resp.lock);
1803 hns3vf_clear_event_cause(hw, 0);
1805 ret = rte_intr_callback_register(pci_dev->intr_handle,
1806 hns3vf_interrupt_handler, eth_dev);
1808 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1809 goto err_intr_callback_register;
1812 /* Enable interrupt */
1813 rte_intr_enable(pci_dev->intr_handle);
1814 hns3vf_enable_irq0(hw);
1816 /* Get configuration from PF */
1817 ret = hns3vf_get_configuration(hw);
1819 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1820 goto err_get_config;
1823 ret = hns3_tqp_stats_init(hw);
1825 goto err_get_config;
1827 /* Hardware statistics of imissed registers cleared. */
1828 ret = hns3_update_imissed_stats(hw, true);
1830 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
1831 goto err_set_tc_queue;
1834 ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num);
1836 PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
1837 goto err_set_tc_queue;
1840 ret = hns3vf_clear_vport_list(hw);
1842 PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1843 goto err_set_tc_queue;
1846 ret = hns3vf_init_hardware(hns);
1848 goto err_set_tc_queue;
1850 hns3_rss_set_default_args(hw);
1852 ret = hns3vf_set_alive(hw, true);
1854 PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1855 goto err_set_tc_queue;
1861 hns3_tqp_stats_uninit(hw);
1864 hns3vf_disable_irq0(hw);
1865 rte_intr_disable(pci_dev->intr_handle);
1866 hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1868 err_intr_callback_register:
1870 hns3_cmd_uninit(hw);
1871 hns3_cmd_destroy_queue(hw);
1879 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1881 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1882 struct hns3_adapter *hns = eth_dev->data->dev_private;
1883 struct hns3_hw *hw = &hns->hw;
1885 PMD_INIT_FUNC_TRACE();
1887 hns3_rss_uninit(hns);
1888 (void)hns3_config_gro(hw, false);
1889 (void)hns3vf_set_alive(hw, false);
1890 (void)hns3vf_set_promisc_mode(hw, false, false, false);
1891 hns3_flow_uninit(eth_dev);
1892 hns3_tqp_stats_uninit(hw);
1893 hns3vf_disable_irq0(hw);
1894 rte_intr_disable(pci_dev->intr_handle);
1895 hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1897 hns3_cmd_uninit(hw);
1898 hns3_cmd_destroy_queue(hw);
1903 hns3vf_do_stop(struct hns3_adapter *hns)
1905 struct hns3_hw *hw = &hns->hw;
1908 hw->mac.link_status = RTE_ETH_LINK_DOWN;
1911 * The "hns3vf_do_stop" function will also be called by .stop_service to
1912 * prepare reset. At the time of global or IMP reset, the command cannot
1913 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
1914 * accessed during the reset process. So the mbuf can not be released
1915 * during reset and is required to be released after the reset is
1918 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
1919 hns3_dev_release_mbufs(hns);
1921 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
1922 hns3_configure_all_mac_addr(hns, true);
1923 ret = hns3_reset_all_tqps(hns);
1925 hns3_err(hw, "failed to reset all queues ret = %d",
1934 hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
1936 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1937 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1938 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1939 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
1940 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
1943 if (dev->data->dev_conf.intr_conf.rxq == 0)
1946 /* unmap the ring with vector */
1947 if (rte_intr_allow_others(intr_handle)) {
1948 vec = RTE_INTR_VEC_RXTX_OFFSET;
1949 base = RTE_INTR_VEC_RXTX_OFFSET;
1951 if (rte_intr_dp_is_en(intr_handle)) {
1952 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
1953 (void)hns3vf_bind_ring_with_vector(hw, vec, false,
1956 if (vec < base + rte_intr_nb_efd_get(intr_handle)
1961 /* Clean datapath event and queue/vec mapping */
1962 rte_intr_efd_disable(intr_handle);
1964 /* Cleanup vector list */
1965 rte_intr_vec_list_free(intr_handle);
1969 hns3vf_dev_stop(struct rte_eth_dev *dev)
1971 struct hns3_adapter *hns = dev->data->dev_private;
1972 struct hns3_hw *hw = &hns->hw;
1974 PMD_INIT_FUNC_TRACE();
1975 dev->data->dev_started = 0;
1977 hw->adapter_state = HNS3_NIC_STOPPING;
1978 hns3_set_rxtx_function(dev);
1980 /* Disable datapath on secondary process. */
1981 hns3_mp_req_stop_rxtx(dev);
1982 /* Prevent crashes when queues are still in use. */
1983 rte_delay_ms(hw->cfg_max_queues);
1985 rte_spinlock_lock(&hw->lock);
1986 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1988 hns3vf_do_stop(hns);
1989 hns3vf_unmap_rx_interrupt(dev);
1990 hw->adapter_state = HNS3_NIC_CONFIGURED;
1992 hns3_rx_scattered_reset(dev);
1993 hns3vf_stop_poll_job(dev);
1994 hns3_stop_report_lse(dev);
1995 rte_spinlock_unlock(&hw->lock);
2001 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
2003 struct hns3_adapter *hns = eth_dev->data->dev_private;
2004 struct hns3_hw *hw = &hns->hw;
2007 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2010 if (hw->adapter_state == HNS3_NIC_STARTED)
2011 ret = hns3vf_dev_stop(eth_dev);
2013 hw->adapter_state = HNS3_NIC_CLOSING;
2014 hns3_reset_abort(hns);
2015 hw->adapter_state = HNS3_NIC_CLOSED;
2016 rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
2017 hns3_configure_all_mc_mac_addr(hns, true);
2018 hns3vf_remove_all_vlan_table(hns);
2019 hns3vf_uninit_vf(eth_dev);
2020 hns3_free_all_queues(eth_dev);
2021 rte_free(hw->reset.wait_data);
2022 hns3_mp_uninit_primary();
2023 hns3_warn(hw, "Close port %u finished", hw->data->port_id);
2029 hns3vf_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
2032 struct hns3_adapter *hns = eth_dev->data->dev_private;
2033 struct hns3_hw *hw = &hns->hw;
2034 uint32_t version = hw->fw_version;
2037 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
2038 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
2039 HNS3_FW_VERSION_BYTE3_S),
2040 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
2041 HNS3_FW_VERSION_BYTE2_S),
2042 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
2043 HNS3_FW_VERSION_BYTE1_S),
2044 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
2045 HNS3_FW_VERSION_BYTE0_S));
2049 ret += 1; /* add the size of '\0' */
2050 if (fw_size < (size_t)ret)
2057 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
2058 __rte_unused int wait_to_complete)
2060 struct hns3_adapter *hns = eth_dev->data->dev_private;
2061 struct hns3_hw *hw = &hns->hw;
2062 struct hns3_mac *mac = &hw->mac;
2063 struct rte_eth_link new_link;
2065 memset(&new_link, 0, sizeof(new_link));
2066 switch (mac->link_speed) {
2067 case RTE_ETH_SPEED_NUM_10M:
2068 case RTE_ETH_SPEED_NUM_100M:
2069 case RTE_ETH_SPEED_NUM_1G:
2070 case RTE_ETH_SPEED_NUM_10G:
2071 case RTE_ETH_SPEED_NUM_25G:
2072 case RTE_ETH_SPEED_NUM_40G:
2073 case RTE_ETH_SPEED_NUM_50G:
2074 case RTE_ETH_SPEED_NUM_100G:
2075 case RTE_ETH_SPEED_NUM_200G:
2076 if (mac->link_status)
2077 new_link.link_speed = mac->link_speed;
2080 if (mac->link_status)
2081 new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
2085 if (!mac->link_status)
2086 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2088 new_link.link_duplex = mac->link_duplex;
2089 new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
2090 new_link.link_autoneg =
2091 !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
2093 return rte_eth_linkstatus_set(eth_dev, &new_link);
2097 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
2099 struct hns3_hw *hw = &hns->hw;
2100 uint16_t nb_rx_q = hw->data->nb_rx_queues;
2101 uint16_t nb_tx_q = hw->data->nb_tx_queues;
2104 ret = hns3vf_set_tc_queue_mapping(hns, nb_rx_q, nb_tx_q);
2108 hns3_enable_rxd_adv_layout(hw);
2110 ret = hns3_init_queues(hns, reset_queue);
2112 hns3_err(hw, "failed to init queues, ret = %d.", ret);
2118 hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
2120 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2121 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2122 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2123 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
2124 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
2125 uint32_t intr_vector;
2130 * hns3 needs a separate interrupt to be used as event interrupt which
2131 * could not be shared with task queue pair, so KERNEL drivers need
2132 * support multiple interrupt vectors.
2134 if (dev->data->dev_conf.intr_conf.rxq == 0 ||
2135 !rte_intr_cap_multiple(intr_handle))
2138 rte_intr_disable(intr_handle);
2139 intr_vector = hw->used_rx_queues;
2140 /* It creates event fd for each intr vector when MSIX is used */
2141 if (rte_intr_efd_enable(intr_handle, intr_vector))
2144 /* Allocate vector list */
2145 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
2146 hw->used_rx_queues)) {
2147 hns3_err(hw, "Failed to allocate %u rx_queues"
2148 " intr_vec", hw->used_rx_queues);
2150 goto vf_alloc_intr_vec_error;
2153 if (rte_intr_allow_others(intr_handle)) {
2154 vec = RTE_INTR_VEC_RXTX_OFFSET;
2155 base = RTE_INTR_VEC_RXTX_OFFSET;
2158 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
2159 ret = hns3vf_bind_ring_with_vector(hw, vec, true,
2160 HNS3_RING_TYPE_RX, q_id);
2162 goto vf_bind_vector_error;
2164 if (rte_intr_vec_list_index_set(intr_handle, q_id, vec))
2165 goto vf_bind_vector_error;
2168 * If there are not enough efds (e.g. not enough interrupt),
2169 * remaining queues will be bond to the last interrupt.
2171 if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
2174 rte_intr_enable(intr_handle);
2177 vf_bind_vector_error:
2178 rte_intr_vec_list_free(intr_handle);
2179 vf_alloc_intr_vec_error:
2180 rte_intr_efd_disable(intr_handle);
2185 hns3vf_restore_rx_interrupt(struct hns3_hw *hw)
2187 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
2188 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2189 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2193 if (dev->data->dev_conf.intr_conf.rxq == 0)
2196 if (rte_intr_dp_is_en(intr_handle)) {
2197 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
2198 ret = hns3vf_bind_ring_with_vector(hw,
2199 rte_intr_vec_list_index_get(intr_handle,
2201 true, HNS3_RING_TYPE_RX, q_id);
2211 hns3vf_restore_filter(struct rte_eth_dev *dev)
2213 hns3_restore_rss_filter(dev);
2217 hns3vf_dev_start(struct rte_eth_dev *dev)
2219 struct hns3_adapter *hns = dev->data->dev_private;
2220 struct hns3_hw *hw = &hns->hw;
2223 PMD_INIT_FUNC_TRACE();
2224 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
2227 rte_spinlock_lock(&hw->lock);
2228 hw->adapter_state = HNS3_NIC_STARTING;
2229 ret = hns3vf_do_start(hns, true);
2231 hw->adapter_state = HNS3_NIC_CONFIGURED;
2232 rte_spinlock_unlock(&hw->lock);
2235 ret = hns3vf_map_rx_interrupt(dev);
2237 goto map_rx_inter_err;
2240 * There are three register used to control the status of a TQP
2241 * (contains a pair of Tx queue and Rx queue) in the new version network
2242 * engine. One is used to control the enabling of Tx queue, the other is
2243 * used to control the enabling of Rx queue, and the last is the master
2244 * switch used to control the enabling of the tqp. The Tx register and
2245 * TQP register must be enabled at the same time to enable a Tx queue.
2246 * The same applies to the Rx queue. For the older network enginem, this
2247 * function only refresh the enabled flag, and it is used to update the
2248 * status of queue in the dpdk framework.
2250 ret = hns3_start_all_txqs(dev);
2252 goto map_rx_inter_err;
2254 ret = hns3_start_all_rxqs(dev);
2256 goto start_all_rxqs_fail;
2258 hw->adapter_state = HNS3_NIC_STARTED;
2259 rte_spinlock_unlock(&hw->lock);
2261 hns3_rx_scattered_calc(dev);
2262 hns3_set_rxtx_function(dev);
2263 hns3_mp_req_start_rxtx(dev);
2265 hns3vf_restore_filter(dev);
2267 /* Enable interrupt of all rx queues before enabling queues */
2268 hns3_dev_all_rx_queue_intr_enable(hw, true);
2269 hns3_start_tqps(hw);
2271 if (dev->data->dev_conf.intr_conf.lsc != 0)
2272 hns3vf_dev_link_update(dev, 0);
2273 hns3vf_start_poll_job(dev);
2277 start_all_rxqs_fail:
2278 hns3_stop_all_txqs(dev);
2280 (void)hns3vf_do_stop(hns);
2281 hw->adapter_state = HNS3_NIC_CONFIGURED;
2282 rte_spinlock_unlock(&hw->lock);
2288 is_vf_reset_done(struct hns3_hw *hw)
2290 #define HNS3_FUN_RST_ING_BITS \
2291 (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
2292 BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
2293 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
2294 BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
2298 if (hw->reset.level == HNS3_VF_RESET) {
2299 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
2300 if (val & HNS3_VF_RST_ING_BIT)
2303 val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
2304 if (val & HNS3_FUN_RST_ING_BITS)
2311 hns3vf_is_reset_pending(struct hns3_adapter *hns)
2313 struct hns3_hw *hw = &hns->hw;
2314 enum hns3_reset_level reset;
2317 * According to the protocol of PCIe, FLR to a PF device resets the PF
2318 * state as well as the SR-IOV extended capability including VF Enable
2319 * which means that VFs no longer exist.
2321 * HNS3_VF_FULL_RESET means PF device is in FLR reset. when PF device
2322 * is in FLR stage, the register state of VF device is not reliable,
2323 * so register states detection can not be carried out. In this case,
2324 * we just ignore the register states and return false to indicate that
2325 * there are no other reset states that need to be processed by driver.
2327 if (hw->reset.level == HNS3_VF_FULL_RESET)
2330 /* Check the registers to confirm whether there is reset pending */
2331 hns3vf_check_event_cause(hns, NULL);
2332 reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
2333 if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET &&
2334 hw->reset.level < reset) {
2335 hns3_warn(hw, "High level reset %d is pending", reset);
2342 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
2344 struct hns3_hw *hw = &hns->hw;
2345 struct hns3_wait_data *wait_data = hw->reset.wait_data;
2348 if (wait_data->result == HNS3_WAIT_SUCCESS) {
2350 * After vf reset is ready, the PF may not have completed
2351 * the reset processing. The vf sending mbox to PF may fail
2352 * during the pf reset, so it is better to add extra delay.
2354 if (hw->reset.level == HNS3_VF_FUNC_RESET ||
2355 hw->reset.level == HNS3_FLR_RESET)
2357 /* Reset retry process, no need to add extra delay. */
2358 if (hw->reset.attempts)
2360 if (wait_data->check_completion == NULL)
2363 wait_data->check_completion = NULL;
2364 wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC;
2365 wait_data->count = 1;
2366 wait_data->result = HNS3_WAIT_REQUEST;
2367 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
2369 hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete");
2371 } else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
2372 hns3_clock_gettime(&tv);
2373 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
2374 tv.tv_sec, tv.tv_usec);
2376 } else if (wait_data->result == HNS3_WAIT_REQUEST)
2379 wait_data->hns = hns;
2380 wait_data->check_completion = is_vf_reset_done;
2381 wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
2382 HNS3VF_RESET_WAIT_MS + hns3_clock_gettime_ms();
2383 wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
2384 wait_data->count = HNS3VF_RESET_WAIT_CNT;
2385 wait_data->result = HNS3_WAIT_REQUEST;
2386 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
2391 hns3vf_prepare_reset(struct hns3_adapter *hns)
2393 struct hns3_hw *hw = &hns->hw;
2396 if (hw->reset.level == HNS3_VF_FUNC_RESET) {
2397 ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
2402 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
2408 hns3vf_stop_service(struct hns3_adapter *hns)
2410 struct hns3_hw *hw = &hns->hw;
2411 struct rte_eth_dev *eth_dev;
2413 eth_dev = &rte_eth_devices[hw->data->port_id];
2414 if (hw->adapter_state == HNS3_NIC_STARTED) {
2416 * Make sure call update link status before hns3vf_stop_poll_job
2417 * because update link status depend on polling job exist.
2419 hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
2420 hw->mac.link_duplex);
2421 hns3vf_stop_poll_job(eth_dev);
2423 hw->mac.link_status = RTE_ETH_LINK_DOWN;
2425 hns3_set_rxtx_function(eth_dev);
2427 /* Disable datapath on secondary process. */
2428 hns3_mp_req_stop_rxtx(eth_dev);
2429 rte_delay_ms(hw->cfg_max_queues);
2431 rte_spinlock_lock(&hw->lock);
2432 if (hw->adapter_state == HNS3_NIC_STARTED ||
2433 hw->adapter_state == HNS3_NIC_STOPPING) {
2434 hns3_enable_all_queues(hw, false);
2435 hns3vf_do_stop(hns);
2436 hw->reset.mbuf_deferred_free = true;
2438 hw->reset.mbuf_deferred_free = false;
2441 * It is cumbersome for hardware to pick-and-choose entries for deletion
2442 * from table space. Hence, for function reset software intervention is
2443 * required to delete the entries.
2445 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
2446 hns3_configure_all_mc_mac_addr(hns, true);
2447 rte_spinlock_unlock(&hw->lock);
2453 hns3vf_start_service(struct hns3_adapter *hns)
2455 struct hns3_hw *hw = &hns->hw;
2456 struct rte_eth_dev *eth_dev;
2458 eth_dev = &rte_eth_devices[hw->data->port_id];
2459 hns3_set_rxtx_function(eth_dev);
2460 hns3_mp_req_start_rxtx(eth_dev);
2461 if (hw->adapter_state == HNS3_NIC_STARTED) {
2462 hns3vf_start_poll_job(eth_dev);
2464 /* Enable interrupt of all rx queues before enabling queues */
2465 hns3_dev_all_rx_queue_intr_enable(hw, true);
2467 * Enable state of each rxq and txq will be recovered after
2468 * reset, so we need to restore them before enable all tqps;
2470 hns3_restore_tqp_enable_state(hw);
2472 * When finished the initialization, enable queues to receive
2473 * and transmit packets.
2475 hns3_enable_all_queues(hw, true);
2482 hns3vf_check_default_mac_change(struct hns3_hw *hw)
2484 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
2485 struct rte_ether_addr *hw_mac;
2489 * The hns3 PF ethdev driver in kernel support setting VF MAC address
2490 * on the host by "ip link set ..." command. If the hns3 PF kernel
2491 * ethdev driver sets the MAC address for VF device after the
2492 * initialization of the related VF device, the PF driver will notify
2493 * VF driver to reset VF device to make the new MAC address effective
2494 * immediately. The hns3 VF PMD driver should check whether the MAC
2495 * address has been changed by the PF kernel ethdev driver, if changed
2496 * VF driver should configure hardware using the new MAC address in the
2497 * recovering hardware configuration stage of the reset process.
2499 ret = hns3vf_get_host_mac_addr(hw);
2503 hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
2504 ret = rte_is_zero_ether_addr(hw_mac);
2506 rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
2508 ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
2510 rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
2511 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
2512 &hw->data->mac_addrs[0]);
2513 hns3_warn(hw, "Default MAC address has been changed to:"
2514 " %s by the host PF kernel ethdev driver",
2523 hns3vf_restore_conf(struct hns3_adapter *hns)
2525 struct hns3_hw *hw = &hns->hw;
2528 ret = hns3vf_check_default_mac_change(hw);
2532 ret = hns3_configure_all_mac_addr(hns, false);
2536 ret = hns3_configure_all_mc_mac_addr(hns, false);
2540 ret = hns3vf_restore_promisc(hns);
2542 goto err_vlan_table;
2544 ret = hns3vf_restore_vlan_conf(hns);
2546 goto err_vlan_table;
2548 ret = hns3vf_get_port_base_vlan_filter_state(hw);
2550 goto err_vlan_table;
2552 ret = hns3vf_restore_rx_interrupt(hw);
2554 goto err_vlan_table;
2556 ret = hns3_restore_gro_conf(hw);
2558 goto err_vlan_table;
2560 if (hw->adapter_state == HNS3_NIC_STARTED) {
2561 ret = hns3vf_do_start(hns, false);
2563 goto err_vlan_table;
2564 hns3_info(hw, "hns3vf dev restart successful!");
2565 } else if (hw->adapter_state == HNS3_NIC_STOPPING)
2566 hw->adapter_state = HNS3_NIC_CONFIGURED;
2568 ret = hns3vf_set_alive(hw, true);
2570 hns3_err(hw, "failed to VF send alive to PF: %d", ret);
2571 goto err_vlan_table;
2577 hns3_configure_all_mc_mac_addr(hns, true);
2579 hns3_configure_all_mac_addr(hns, true);
2583 static enum hns3_reset_level
2584 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
2586 enum hns3_reset_level reset_level;
2588 /* return the highest priority reset level amongst all */
2589 if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
2590 reset_level = HNS3_VF_RESET;
2591 else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
2592 reset_level = HNS3_VF_FULL_RESET;
2593 else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
2594 reset_level = HNS3_VF_PF_FUNC_RESET;
2595 else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
2596 reset_level = HNS3_VF_FUNC_RESET;
2597 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
2598 reset_level = HNS3_FLR_RESET;
2600 reset_level = HNS3_NONE_RESET;
2602 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
2603 return HNS3_NONE_RESET;
2609 hns3vf_reset_service(void *param)
2611 struct hns3_adapter *hns = (struct hns3_adapter *)param;
2612 struct hns3_hw *hw = &hns->hw;
2613 enum hns3_reset_level reset_level;
2614 struct timeval tv_delta;
2615 struct timeval tv_start;
2620 * The interrupt is not triggered within the delay time.
2621 * The interrupt may have been lost. It is necessary to handle
2622 * the interrupt to recover from the error.
2624 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2625 SCHEDULE_DEFERRED) {
2626 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
2628 hns3_err(hw, "Handling interrupts in delayed tasks");
2629 hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
2630 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2631 if (reset_level == HNS3_NONE_RESET) {
2632 hns3_err(hw, "No reset level is set, try global reset");
2633 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
2636 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
2639 * Hardware reset has been notified, we now have to poll & check if
2640 * hardware has actually completed the reset sequence.
2642 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2643 if (reset_level != HNS3_NONE_RESET) {
2644 hns3_clock_gettime(&tv_start);
2645 hns3_reset_process(hns, reset_level);
2646 hns3_clock_gettime(&tv);
2647 timersub(&tv, &tv_start, &tv_delta);
2648 msec = hns3_clock_calctime_ms(&tv_delta);
2649 if (msec > HNS3_RESET_PROCESS_MS)
2650 hns3_err(hw, "%d handle long time delta %" PRIu64
2651 " ms time=%ld.%.6ld",
2652 hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
2657 hns3vf_reinit_dev(struct hns3_adapter *hns)
2659 struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
2660 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2661 struct hns3_hw *hw = &hns->hw;
2664 if (hw->reset.level == HNS3_VF_FULL_RESET) {
2665 rte_intr_disable(pci_dev->intr_handle);
2666 ret = hns3vf_set_bus_master(pci_dev, true);
2668 hns3_err(hw, "failed to set pci bus, ret = %d", ret);
2673 /* Firmware command initialize */
2674 ret = hns3_cmd_init(hw);
2676 hns3_err(hw, "Failed to init cmd: %d", ret);
2680 if (hw->reset.level == HNS3_VF_FULL_RESET) {
2682 * UIO enables msix by writing the pcie configuration space
2683 * vfio_pci enables msix in rte_intr_enable.
2685 if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO ||
2686 pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) {
2687 if (hns3vf_enable_msix(pci_dev, true))
2688 hns3_err(hw, "Failed to enable msix");
2691 rte_intr_enable(pci_dev->intr_handle);
2694 ret = hns3_reset_all_tqps(hns);
2696 hns3_err(hw, "Failed to reset all queues: %d", ret);
2700 ret = hns3vf_init_hardware(hns);
2702 hns3_err(hw, "Failed to init hardware: %d", ret);
2709 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
2710 .dev_configure = hns3vf_dev_configure,
2711 .dev_start = hns3vf_dev_start,
2712 .dev_stop = hns3vf_dev_stop,
2713 .dev_close = hns3vf_dev_close,
2714 .mtu_set = hns3vf_dev_mtu_set,
2715 .promiscuous_enable = hns3vf_dev_promiscuous_enable,
2716 .promiscuous_disable = hns3vf_dev_promiscuous_disable,
2717 .allmulticast_enable = hns3vf_dev_allmulticast_enable,
2718 .allmulticast_disable = hns3vf_dev_allmulticast_disable,
2719 .stats_get = hns3_stats_get,
2720 .stats_reset = hns3_stats_reset,
2721 .xstats_get = hns3_dev_xstats_get,
2722 .xstats_get_names = hns3_dev_xstats_get_names,
2723 .xstats_reset = hns3_dev_xstats_reset,
2724 .xstats_get_by_id = hns3_dev_xstats_get_by_id,
2725 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
2726 .dev_infos_get = hns3vf_dev_infos_get,
2727 .fw_version_get = hns3vf_fw_version_get,
2728 .rx_queue_setup = hns3_rx_queue_setup,
2729 .tx_queue_setup = hns3_tx_queue_setup,
2730 .rx_queue_release = hns3_dev_rx_queue_release,
2731 .tx_queue_release = hns3_dev_tx_queue_release,
2732 .rx_queue_start = hns3_dev_rx_queue_start,
2733 .rx_queue_stop = hns3_dev_rx_queue_stop,
2734 .tx_queue_start = hns3_dev_tx_queue_start,
2735 .tx_queue_stop = hns3_dev_tx_queue_stop,
2736 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
2737 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
2738 .rxq_info_get = hns3_rxq_info_get,
2739 .txq_info_get = hns3_txq_info_get,
2740 .rx_burst_mode_get = hns3_rx_burst_mode_get,
2741 .tx_burst_mode_get = hns3_tx_burst_mode_get,
2742 .mac_addr_add = hns3_add_mac_addr,
2743 .mac_addr_remove = hns3_remove_mac_addr,
2744 .mac_addr_set = hns3vf_set_default_mac_addr,
2745 .set_mc_addr_list = hns3vf_set_mc_mac_addr_list,
2746 .link_update = hns3vf_dev_link_update,
2747 .rss_hash_update = hns3_dev_rss_hash_update,
2748 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
2749 .reta_update = hns3_dev_rss_reta_update,
2750 .reta_query = hns3_dev_rss_reta_query,
2751 .flow_ops_get = hns3_dev_flow_ops_get,
2752 .vlan_filter_set = hns3vf_vlan_filter_set,
2753 .vlan_offload_set = hns3vf_vlan_offload_set,
2754 .get_reg = hns3_get_regs,
2755 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
2756 .tx_done_cleanup = hns3_tx_done_cleanup,
2759 static const struct hns3_reset_ops hns3vf_reset_ops = {
2760 .reset_service = hns3vf_reset_service,
2761 .stop_service = hns3vf_stop_service,
2762 .prepare_reset = hns3vf_prepare_reset,
2763 .wait_hardware_ready = hns3vf_wait_hardware_ready,
2764 .reinit_dev = hns3vf_reinit_dev,
2765 .restore_conf = hns3vf_restore_conf,
2766 .start_service = hns3vf_start_service,
2770 hns3vf_init_hw_ops(struct hns3_hw *hw)
2772 hw->ops.add_mc_mac_addr = hns3vf_add_mc_mac_addr;
2773 hw->ops.del_mc_mac_addr = hns3vf_remove_mc_mac_addr;
2774 hw->ops.add_uc_mac_addr = hns3vf_add_uc_mac_addr;
2775 hw->ops.del_uc_mac_addr = hns3vf_remove_uc_mac_addr;
2779 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
2781 struct hns3_adapter *hns = eth_dev->data->dev_private;
2782 struct hns3_hw *hw = &hns->hw;
2785 PMD_INIT_FUNC_TRACE();
2787 hns3_flow_init(eth_dev);
2789 hns3_set_rxtx_function(eth_dev);
2790 eth_dev->dev_ops = &hns3vf_eth_dev_ops;
2791 eth_dev->rx_queue_count = hns3_rx_queue_count;
2792 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2793 ret = hns3_mp_init_secondary();
2795 PMD_INIT_LOG(ERR, "Failed to init for secondary "
2796 "process, ret = %d", ret);
2797 goto err_mp_init_secondary;
2799 hw->secondary_cnt++;
2800 hns3_tx_push_init(eth_dev);
2804 ret = hns3_mp_init_primary();
2807 "Failed to init for primary process, ret = %d",
2809 goto err_mp_init_primary;
2812 hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2814 hw->data = eth_dev->data;
2815 hns3_parse_devargs(eth_dev);
2817 ret = hns3_reset_init(hw);
2819 goto err_init_reset;
2820 hw->reset.ops = &hns3vf_reset_ops;
2822 hns3vf_init_hw_ops(hw);
2823 ret = hns3vf_init_vf(eth_dev);
2825 PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2829 /* Allocate memory for storing MAC addresses */
2830 eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac",
2831 sizeof(struct rte_ether_addr) *
2832 HNS3_VF_UC_MACADDR_NUM, 0);
2833 if (eth_dev->data->mac_addrs == NULL) {
2834 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
2835 "to store MAC addresses",
2836 sizeof(struct rte_ether_addr) *
2837 HNS3_VF_UC_MACADDR_NUM);
2839 goto err_rte_zmalloc;
2843 * The hns3 PF ethdev driver in kernel support setting VF MAC address
2844 * on the host by "ip link set ..." command. To avoid some incorrect
2845 * scenes, for example, hns3 VF PMD driver fails to receive and send
2846 * packets after user configure the MAC address by using the
2847 * "ip link set ..." command, hns3 VF PMD driver keep the same MAC
2848 * address strategy as the hns3 kernel ethdev driver in the
2849 * initialization. If user configure a MAC address by the ip command
2850 * for VF device, then hns3 VF PMD driver will start with it, otherwise
2851 * start with a random MAC address in the initialization.
2853 if (rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr))
2854 rte_eth_random_addr(hw->mac.mac_addr);
2855 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
2856 ð_dev->data->mac_addrs[0]);
2858 hw->adapter_state = HNS3_NIC_INITIALIZED;
2860 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2862 hns3_err(hw, "Reschedule reset service after dev_init");
2863 hns3_schedule_reset(hns);
2865 /* IMP will wait ready flag before reset */
2866 hns3_notify_reset_ready(hw, false);
2868 rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
2873 hns3vf_uninit_vf(eth_dev);
2876 rte_free(hw->reset.wait_data);
2879 hns3_mp_uninit_primary();
2881 err_mp_init_primary:
2882 err_mp_init_secondary:
2883 eth_dev->dev_ops = NULL;
2884 eth_dev->rx_pkt_burst = NULL;
2885 eth_dev->rx_descriptor_status = NULL;
2886 eth_dev->tx_pkt_burst = NULL;
2887 eth_dev->tx_pkt_prepare = NULL;
2888 eth_dev->tx_descriptor_status = NULL;
2894 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
2896 struct hns3_adapter *hns = eth_dev->data->dev_private;
2897 struct hns3_hw *hw = &hns->hw;
2899 PMD_INIT_FUNC_TRACE();
2901 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2904 if (hw->adapter_state < HNS3_NIC_CLOSING)
2905 hns3vf_dev_close(eth_dev);
2907 hw->adapter_state = HNS3_NIC_REMOVED;
2912 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2913 struct rte_pci_device *pci_dev)
2915 return rte_eth_dev_pci_generic_probe(pci_dev,
2916 sizeof(struct hns3_adapter),
2921 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
2923 return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
2926 static const struct rte_pci_id pci_id_hns3vf_map[] = {
2927 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
2928 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
2929 { .vendor_id = 0, }, /* sentinel */
2932 static struct rte_pci_driver rte_hns3vf_pmd = {
2933 .id_table = pci_id_hns3vf_map,
2934 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2935 .probe = eth_hns3vf_pci_probe,
2936 .remove = eth_hns3vf_pci_remove,
2939 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
2940 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
2941 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");
2942 RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf,
2943 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
2944 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
2945 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
2946 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16_t> ");