1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
11 #include <arpa/inet.h>
12 #include <rte_alarm.h>
13 #include <rte_atomic.h>
14 #include <rte_bus_pci.h>
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
20 #include <rte_ether.h>
21 #include <rte_ethdev_driver.h>
22 #include <rte_ethdev_pci.h>
23 #include <rte_interrupts.h>
28 #include "hns3_ethdev.h"
29 #include "hns3_logs.h"
30 #include "hns3_rxtx.h"
31 #include "hns3_regs.h"
32 #include "hns3_intr.h"
36 #define HNS3VF_KEEP_ALIVE_INTERVAL 2000000 /* us */
37 #define HNS3VF_SERVICE_INTERVAL 1000000 /* us */
39 #define HNS3VF_RESET_WAIT_MS 20
40 #define HNS3VF_RESET_WAIT_CNT 2000
42 /* Reset related Registers */
43 #define HNS3_GLOBAL_RESET_BIT 0
44 #define HNS3_CORE_RESET_BIT 1
45 #define HNS3_IMP_RESET_BIT 2
46 #define HNS3_FUN_RST_ING_B 0
48 enum hns3vf_evt_cause {
49 HNS3VF_VECTOR0_EVENT_RST,
50 HNS3VF_VECTOR0_EVENT_MBX,
51 HNS3VF_VECTOR0_EVENT_OTHER,
54 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
56 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
57 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
60 hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
61 __attribute__ ((unused)) uint32_t idx,
62 __attribute__ ((unused)) uint32_t pool)
64 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
65 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
68 rte_spinlock_lock(&hw->lock);
69 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
70 HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
71 RTE_ETHER_ADDR_LEN, false, NULL, 0);
72 rte_spinlock_unlock(&hw->lock);
74 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
76 hns3_err(hw, "Failed to add mac addr(%s) for vf: %d", mac_str,
84 hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
86 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
87 /* index will be checked by upper level rte interface */
88 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
89 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
92 rte_spinlock_lock(&hw->lock);
93 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
94 HNS3_MBX_MAC_VLAN_UC_REMOVE,
95 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
97 rte_spinlock_unlock(&hw->lock);
99 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
101 hns3_err(hw, "Failed to remove mac addr(%s) for vf: %d",
107 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
108 struct rte_ether_addr *mac_addr)
110 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
111 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
112 struct rte_ether_addr *old_addr;
113 uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
114 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
117 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
118 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
120 hns3_err(hw, "Failed to set mac addr, addr(%s) invalid.",
125 old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
126 rte_spinlock_lock(&hw->lock);
127 memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
128 memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
131 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
132 HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
133 HNS3_TWO_ETHER_ADDR_LEN, false, NULL, 0);
135 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
137 hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", mac_str,
141 rte_ether_addr_copy(mac_addr,
142 (struct rte_ether_addr *)hw->mac.mac_addr);
143 rte_spinlock_unlock(&hw->lock);
149 hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del)
151 struct hns3_hw *hw = &hns->hw;
152 struct rte_ether_addr *addr;
153 enum hns3_mbx_mac_vlan_subcode opcode;
154 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
159 opcode = HNS3_MBX_MAC_VLAN_UC_REMOVE;
161 opcode = HNS3_MBX_MAC_VLAN_UC_ADD;
162 for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) {
163 addr = &hw->data->mac_addrs[i];
164 if (!rte_is_valid_assigned_ether_addr(addr))
166 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
167 hns3_dbg(hw, "rm mac addr: %s", mac_str);
168 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, opcode,
169 addr->addr_bytes, RTE_ETHER_ADDR_LEN,
172 hns3_err(hw, "Failed to remove mac addr for vf: %d",
181 hns3vf_add_mc_mac_addr(struct hns3_adapter *hns,
182 struct rte_ether_addr *mac_addr)
184 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
185 struct hns3_hw *hw = &hns->hw;
188 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
189 HNS3_MBX_MAC_VLAN_MC_ADD,
190 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
193 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
195 hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
204 hns3vf_remove_mc_mac_addr(struct hns3_adapter *hns,
205 struct rte_ether_addr *mac_addr)
207 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
208 struct hns3_hw *hw = &hns->hw;
211 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
212 HNS3_MBX_MAC_VLAN_MC_REMOVE,
213 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
216 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
218 hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
227 hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev,
228 struct rte_ether_addr *mc_addr_set,
231 struct hns3_adapter *hns = dev->data->dev_private;
232 struct hns3_hw *hw = &hns->hw;
233 struct rte_ether_addr *addr;
234 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
241 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
242 hns3_err(hw, "Failed to set mc mac addr, nb_mc_addr(%d) "
243 "invalid. valid range: 0~%d",
244 nb_mc_addr, HNS3_MC_MACADDR_NUM);
248 set_addr_num = (int)nb_mc_addr;
249 for (i = 0; i < set_addr_num; i++) {
250 addr = &mc_addr_set[i];
251 if (!rte_is_multicast_ether_addr(addr)) {
252 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
255 "Failed to set mc mac addr, addr(%s) invalid.",
260 rte_spinlock_lock(&hw->lock);
261 cur_addr_num = hw->mc_addrs_num;
262 for (i = 0; i < cur_addr_num; i++) {
263 num = cur_addr_num - i - 1;
264 addr = &hw->mc_addrs[num];
265 ret = hns3vf_remove_mc_mac_addr(hns, addr);
267 rte_spinlock_unlock(&hw->lock);
274 for (i = 0; i < set_addr_num; i++) {
275 addr = &mc_addr_set[i];
276 ret = hns3vf_add_mc_mac_addr(hns, addr);
278 rte_spinlock_unlock(&hw->lock);
282 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
285 rte_spinlock_unlock(&hw->lock);
291 hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
293 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
294 struct hns3_hw *hw = &hns->hw;
295 struct rte_ether_addr *addr;
300 for (i = 0; i < hw->mc_addrs_num; i++) {
301 addr = &hw->mc_addrs[i];
302 if (!rte_is_multicast_ether_addr(addr))
305 ret = hns3vf_remove_mc_mac_addr(hns, addr);
307 ret = hns3vf_add_mc_mac_addr(hns, addr);
310 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
312 hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d",
313 del ? "Remove" : "Restore", mac_str, ret);
320 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
322 struct hns3_mbx_vf_to_pf_cmd *req;
323 struct hns3_cmd_desc desc;
326 req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
328 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
329 req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
330 req->msg[1] = en_bc_pmc ? 1 : 0;
332 ret = hns3_cmd_send(hw, &desc, 1);
334 hns3_err(hw, "Set promisc mode fail, status is %d", ret);
340 hns3vf_dev_configure(struct rte_eth_dev *dev)
342 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
343 struct hns3_rss_conf *rss_cfg = &hw->rss_info;
344 struct rte_eth_conf *conf = &dev->data->dev_conf;
345 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
346 uint16_t nb_rx_q = dev->data->nb_rx_queues;
347 uint16_t nb_tx_q = dev->data->nb_tx_queues;
348 struct rte_eth_rss_conf rss_conf;
353 * Hardware does not support where the number of rx and tx queues is
354 * not equal in hip08.
356 if (nb_rx_q != nb_tx_q) {
358 "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
359 "Hardware does not support this configuration!",
364 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
365 hns3_err(hw, "setting link speed/duplex not supported");
369 hw->adapter_state = HNS3_NIC_CONFIGURING;
371 /* When RSS is not configured, redirect the packet queue 0 */
372 if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
373 rss_conf = conf->rx_adv_conf.rss_conf;
374 if (rss_conf.rss_key == NULL) {
375 rss_conf.rss_key = rss_cfg->key;
376 rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
379 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
385 * If jumbo frames are enabled, MTU needs to be refreshed
386 * according to the maximum RX packet length.
388 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
390 * Security of max_rx_pkt_len is guaranteed in dpdk frame.
391 * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
392 * can safely assign to "uint16_t" type variable.
394 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
395 ret = hns3vf_dev_mtu_set(dev, mtu);
398 dev->data->mtu = mtu;
401 ret = hns3vf_dev_configure_vlan(dev);
405 hw->adapter_state = HNS3_NIC_CONFIGURED;
409 hw->adapter_state = HNS3_NIC_INITIALIZED;
414 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
418 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
419 sizeof(mtu), true, NULL, 0);
421 hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
427 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
429 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
430 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
433 if (dev->data->dev_started) {
434 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
435 "before configuration", dev->data->port_id);
439 if (rte_atomic16_read(&hw->reset.resetting)) {
440 hns3_err(hw, "Failed to set mtu during resetting");
444 rte_spinlock_lock(&hw->lock);
445 ret = hns3vf_config_mtu(hw, mtu);
447 rte_spinlock_unlock(&hw->lock);
450 if (frame_size > RTE_ETHER_MAX_LEN)
451 dev->data->dev_conf.rxmode.offloads |=
452 DEV_RX_OFFLOAD_JUMBO_FRAME;
454 dev->data->dev_conf.rxmode.offloads &=
455 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
456 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
457 rte_spinlock_unlock(&hw->lock);
463 hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
465 struct hns3_adapter *hns = eth_dev->data->dev_private;
466 struct hns3_hw *hw = &hns->hw;
468 info->max_rx_queues = hw->tqps_num;
469 info->max_tx_queues = hw->tqps_num;
470 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
471 info->min_rx_bufsize = hw->rx_buf_len;
472 info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
473 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
475 info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
476 DEV_RX_OFFLOAD_UDP_CKSUM |
477 DEV_RX_OFFLOAD_TCP_CKSUM |
478 DEV_RX_OFFLOAD_SCTP_CKSUM |
479 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
480 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
481 DEV_RX_OFFLOAD_KEEP_CRC |
482 DEV_RX_OFFLOAD_SCATTER |
483 DEV_RX_OFFLOAD_VLAN_STRIP |
484 DEV_RX_OFFLOAD_QINQ_STRIP |
485 DEV_RX_OFFLOAD_VLAN_FILTER |
486 DEV_RX_OFFLOAD_JUMBO_FRAME);
487 info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
488 info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
489 DEV_TX_OFFLOAD_IPV4_CKSUM |
490 DEV_TX_OFFLOAD_TCP_CKSUM |
491 DEV_TX_OFFLOAD_UDP_CKSUM |
492 DEV_TX_OFFLOAD_SCTP_CKSUM |
493 DEV_TX_OFFLOAD_VLAN_INSERT |
494 DEV_TX_OFFLOAD_QINQ_INSERT |
495 DEV_TX_OFFLOAD_MULTI_SEGS |
496 info->tx_queue_offload_capa);
498 info->rx_desc_lim = (struct rte_eth_desc_lim) {
499 .nb_max = HNS3_MAX_RING_DESC,
500 .nb_min = HNS3_MIN_RING_DESC,
501 .nb_align = HNS3_ALIGN_RING_DESC,
504 info->tx_desc_lim = (struct rte_eth_desc_lim) {
505 .nb_max = HNS3_MAX_RING_DESC,
506 .nb_min = HNS3_MIN_RING_DESC,
507 .nb_align = HNS3_ALIGN_RING_DESC,
510 info->vmdq_queue_num = 0;
512 info->reta_size = HNS3_RSS_IND_TBL_SIZE;
513 info->hash_key_size = HNS3_RSS_KEY_SIZE;
514 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
515 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
516 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
522 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
524 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
528 hns3vf_disable_irq0(struct hns3_hw *hw)
530 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
534 hns3vf_enable_irq0(struct hns3_hw *hw)
536 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
539 static enum hns3vf_evt_cause
540 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
542 struct hns3_hw *hw = &hns->hw;
543 enum hns3vf_evt_cause ret;
544 uint32_t cmdq_stat_reg;
545 uint32_t rst_ing_reg;
548 /* Fetch the events from their corresponding regs */
549 cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
551 if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
552 rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
553 hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
554 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
555 rte_atomic16_set(&hw->reset.disable_cmd, 1);
556 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
557 hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
558 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
560 hw->reset.stats.global_cnt++;
561 hns3_warn(hw, "Global reset detected, clear reset status");
563 hns3_schedule_delayed_reset(hns);
564 hns3_warn(hw, "Global reset detected, don't clear reset status");
567 ret = HNS3VF_VECTOR0_EVENT_RST;
571 /* Check for vector0 mailbox(=CMDQ RX) event source */
572 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
573 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
574 ret = HNS3VF_VECTOR0_EVENT_MBX;
579 ret = HNS3VF_VECTOR0_EVENT_OTHER;
587 hns3vf_interrupt_handler(void *param)
589 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
590 struct hns3_adapter *hns = dev->data->dev_private;
591 struct hns3_hw *hw = &hns->hw;
592 enum hns3vf_evt_cause event_cause;
595 if (hw->irq_thread_id == 0)
596 hw->irq_thread_id = pthread_self();
598 /* Disable interrupt */
599 hns3vf_disable_irq0(hw);
601 /* Read out interrupt causes */
602 event_cause = hns3vf_check_event_cause(hns, &clearval);
604 switch (event_cause) {
605 case HNS3VF_VECTOR0_EVENT_RST:
606 hns3_schedule_reset(hns);
608 case HNS3VF_VECTOR0_EVENT_MBX:
609 hns3_dev_handle_mbx_msg(hw);
615 /* Clear interrupt causes */
616 hns3vf_clear_event_cause(hw, clearval);
618 /* Enable interrupt */
619 hns3vf_enable_irq0(hw);
623 hns3vf_check_tqp_info(struct hns3_hw *hw)
627 tqps_num = hw->tqps_num;
628 if (tqps_num > HNS3_MAX_TQP_NUM_PER_FUNC || tqps_num == 0) {
629 PMD_INIT_LOG(ERR, "Get invalid tqps_num(%u) from PF. valid "
631 tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
635 if (hw->rx_buf_len == 0)
636 hw->rx_buf_len = HNS3_DEFAULT_RX_BUF_LEN;
637 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, hw->tqps_num);
643 hns3vf_get_queue_info(struct hns3_hw *hw)
645 #define HNS3VF_TQPS_RSS_INFO_LEN 6
646 uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
649 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
650 resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
652 PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
656 memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
657 memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
658 memcpy(&hw->rx_buf_len, &resp_msg[4], sizeof(uint16_t));
660 return hns3vf_check_tqp_info(hw);
664 hns3vf_get_queue_depth(struct hns3_hw *hw)
666 #define HNS3VF_TQPS_DEPTH_INFO_LEN 4
667 uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN];
670 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true,
671 resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN);
673 PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d",
678 memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t));
679 memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t));
685 hns3vf_get_tc_info(struct hns3_hw *hw)
690 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0,
691 true, &resp_msg, sizeof(resp_msg));
693 hns3_err(hw, "VF request to get TC info from PF failed %d",
698 hw->hw_tc_map = resp_msg;
704 hns3vf_get_configuration(struct hns3_hw *hw)
708 hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
710 /* Get queue configuration from PF */
711 ret = hns3vf_get_queue_info(hw);
715 /* Get queue depth info from PF */
716 ret = hns3vf_get_queue_depth(hw);
720 /* Get tc configuration from PF */
721 return hns3vf_get_tc_info(hw);
725 hns3vf_set_tc_info(struct hns3_adapter *hns)
727 struct hns3_hw *hw = &hns->hw;
728 uint16_t nb_rx_q = hw->data->nb_rx_queues;
733 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
734 if (hw->hw_tc_map & BIT(i))
737 new_tqps = RTE_MIN(hw->tqps_num, nb_rx_q);
738 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, new_tqps / hw->num_tc);
739 hw->alloc_tqps = hw->alloc_rss_size * hw->num_tc;
741 hns3_tc_queue_mapping_cfg(hw);
745 hns3vf_request_link_info(struct hns3_hw *hw)
750 if (rte_atomic16_read(&hw->reset.resetting))
752 ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
753 &resp_msg, sizeof(resp_msg));
755 hns3_err(hw, "Failed to fetch link status from PF: %d", ret);
759 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
761 #define HNS3VF_VLAN_MBX_MSG_LEN 5
762 struct hns3_hw *hw = &hns->hw;
763 uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
764 uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
765 uint8_t is_kill = on ? 0 : 1;
767 msg_data[0] = is_kill;
768 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
769 memcpy(&msg_data[3], &proto, sizeof(proto));
771 return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
772 msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
777 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
779 struct hns3_adapter *hns = dev->data->dev_private;
780 struct hns3_hw *hw = &hns->hw;
783 if (rte_atomic16_read(&hw->reset.resetting)) {
785 "vf set vlan id failed during resetting, vlan_id =%u",
789 rte_spinlock_lock(&hw->lock);
790 ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
791 rte_spinlock_unlock(&hw->lock);
793 hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
800 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
805 msg_data = enable ? 1 : 0;
806 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
807 &msg_data, sizeof(msg_data), false, NULL, 0);
809 hns3_err(hw, "vf enable strip failed, ret =%d", ret);
815 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
817 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
818 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
819 unsigned int tmp_mask;
821 tmp_mask = (unsigned int)mask;
822 /* Vlan stripping setting */
823 if (tmp_mask & ETH_VLAN_STRIP_MASK) {
824 rte_spinlock_lock(&hw->lock);
825 /* Enable or disable VLAN stripping */
826 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
827 hns3vf_en_hw_strip_rxvtag(hw, true);
829 hns3vf_en_hw_strip_rxvtag(hw, false);
830 rte_spinlock_unlock(&hw->lock);
837 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
839 struct rte_vlan_filter_conf *vfc;
840 struct hns3_hw *hw = &hns->hw;
847 vfc = &hw->data->vlan_filter_conf;
848 for (i = 0; i < RTE_DIM(vfc->ids); i++) {
849 if (vfc->ids[i] == 0)
854 * 64 means the num bits of ids, one bit corresponds to
858 /* count trailing zeroes */
859 vbit = ~ids & (ids - 1);
860 /* clear least significant bit set */
861 ids ^= (ids ^ (ids - 1)) ^ vbit;
866 ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
869 "VF handle vlan table failed, ret =%d, on = %d",
880 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
882 return hns3vf_handle_all_vlan_table(hns, 0);
886 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
888 struct hns3_hw *hw = &hns->hw;
889 struct rte_eth_conf *dev_conf;
893 dev_conf = &hw->data->dev_conf;
894 en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
896 ret = hns3vf_en_hw_strip_rxvtag(hw, en);
898 hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
904 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
906 struct hns3_adapter *hns = dev->data->dev_private;
907 struct rte_eth_dev_data *data = dev->data;
908 struct hns3_hw *hw = &hns->hw;
911 if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
912 data->dev_conf.txmode.hw_vlan_reject_untagged ||
913 data->dev_conf.txmode.hw_vlan_insert_pvid) {
914 hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
915 "or hw_vlan_insert_pvid is not support!");
918 /* Apply vlan offload setting */
919 ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
921 hns3_err(hw, "dev config vlan offload failed, ret =%d", ret);
927 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
931 msg_data = alive ? 1 : 0;
932 return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
933 sizeof(msg_data), false, NULL, 0);
937 hns3vf_keep_alive_handler(void *param)
939 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
940 struct hns3_adapter *hns = eth_dev->data->dev_private;
941 struct hns3_hw *hw = &hns->hw;
945 ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
946 false, &respmsg, sizeof(uint8_t));
948 hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
951 rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
956 hns3vf_service_handler(void *param)
958 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
959 struct hns3_adapter *hns = eth_dev->data->dev_private;
960 struct hns3_hw *hw = &hns->hw;
963 * The query link status and reset processing are executed in the
964 * interrupt thread.When the IMP reset occurs, IMP will not respond,
965 * and the query operation will time out after 30ms. In the case of
966 * multiple PF/VFs, each query failure timeout causes the IMP reset
967 * interrupt to fail to respond within 100ms.
968 * Before querying the link status, check whether there is a reset
969 * pending, and if so, abandon the query.
971 if (!hns3vf_is_reset_pending(hns))
972 hns3vf_request_link_info(hw);
974 hns3_warn(hw, "Cancel the query when reset is pending");
976 rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
981 hns3vf_init_hardware(struct hns3_adapter *hns)
983 struct hns3_hw *hw = &hns->hw;
984 uint16_t mtu = hw->data->mtu;
987 ret = hns3vf_set_promisc_mode(hw, true);
991 ret = hns3vf_config_mtu(hw, mtu);
993 goto err_init_hardware;
995 ret = hns3vf_vlan_filter_configure(hns, 0, 1);
997 PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
998 goto err_init_hardware;
1001 ret = hns3_config_gro(hw, false);
1003 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1004 goto err_init_hardware;
1007 ret = hns3vf_set_alive(hw, true);
1009 PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1010 goto err_init_hardware;
1013 hns3vf_request_link_info(hw);
1017 (void)hns3vf_set_promisc_mode(hw, false);
1022 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1024 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1025 struct hns3_adapter *hns = eth_dev->data->dev_private;
1026 struct hns3_hw *hw = &hns->hw;
1029 PMD_INIT_FUNC_TRACE();
1031 /* Get hardware io base address from pcie BAR2 IO space */
1032 hw->io_base = pci_dev->mem_resource[2].addr;
1034 /* Firmware command queue initialize */
1035 ret = hns3_cmd_init_queue(hw);
1037 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1038 goto err_cmd_init_queue;
1041 /* Firmware command initialize */
1042 ret = hns3_cmd_init(hw);
1044 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1048 rte_spinlock_init(&hw->mbx_resp.lock);
1050 hns3vf_clear_event_cause(hw, 0);
1052 ret = rte_intr_callback_register(&pci_dev->intr_handle,
1053 hns3vf_interrupt_handler, eth_dev);
1055 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1056 goto err_intr_callback_register;
1059 /* Enable interrupt */
1060 rte_intr_enable(&pci_dev->intr_handle);
1061 hns3vf_enable_irq0(hw);
1063 /* Get configuration from PF */
1064 ret = hns3vf_get_configuration(hw);
1066 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1067 goto err_get_config;
1070 rte_eth_random_addr(hw->mac.mac_addr); /* Generate a random mac addr */
1072 ret = hns3vf_init_hardware(hns);
1074 goto err_get_config;
1076 hns3_set_default_rss_args(hw);
1078 (void)hns3_stats_reset(eth_dev);
1082 hns3vf_disable_irq0(hw);
1083 rte_intr_disable(&pci_dev->intr_handle);
1084 hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
1086 err_intr_callback_register:
1087 hns3_cmd_uninit(hw);
1090 hns3_cmd_destroy_queue(hw);
1099 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1101 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1102 struct hns3_adapter *hns = eth_dev->data->dev_private;
1103 struct hns3_hw *hw = &hns->hw;
1105 PMD_INIT_FUNC_TRACE();
1107 hns3_rss_uninit(hns);
1108 (void)hns3vf_set_alive(hw, false);
1109 (void)hns3vf_set_promisc_mode(hw, false);
1110 hns3vf_disable_irq0(hw);
1111 rte_intr_disable(&pci_dev->intr_handle);
1112 hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
1114 hns3_cmd_uninit(hw);
1115 hns3_cmd_destroy_queue(hw);
1120 hns3vf_do_stop(struct hns3_adapter *hns)
1122 struct hns3_hw *hw = &hns->hw;
1125 hw->mac.link_status = ETH_LINK_DOWN;
1127 if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
1128 hns3vf_configure_mac_addr(hns, true);
1131 reset_queue = false;
1132 return hns3_stop_queues(hns, reset_queue);
1136 hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
1138 struct hns3_adapter *hns = eth_dev->data->dev_private;
1139 struct hns3_hw *hw = &hns->hw;
1141 PMD_INIT_FUNC_TRACE();
1143 hw->adapter_state = HNS3_NIC_STOPPING;
1144 hns3_set_rxtx_function(eth_dev);
1146 /* Disable datapath on secondary process. */
1147 hns3_mp_req_stop_rxtx(eth_dev);
1148 /* Prevent crashes when queues are still in use. */
1149 rte_delay_ms(hw->tqps_num);
1151 rte_spinlock_lock(&hw->lock);
1152 if (rte_atomic16_read(&hw->reset.resetting) == 0) {
1153 hns3vf_do_stop(hns);
1154 hns3_dev_release_mbufs(hns);
1155 hw->adapter_state = HNS3_NIC_CONFIGURED;
1157 rte_spinlock_unlock(&hw->lock);
1161 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1163 struct hns3_adapter *hns = eth_dev->data->dev_private;
1164 struct hns3_hw *hw = &hns->hw;
1166 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1169 if (hw->adapter_state == HNS3_NIC_STARTED)
1170 hns3vf_dev_stop(eth_dev);
1172 hw->adapter_state = HNS3_NIC_CLOSING;
1173 hns3_reset_abort(hns);
1174 hw->adapter_state = HNS3_NIC_CLOSED;
1175 rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1176 rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
1177 hns3vf_configure_all_mc_mac_addr(hns, true);
1178 hns3vf_remove_all_vlan_table(hns);
1179 hns3vf_uninit_vf(eth_dev);
1180 hns3_free_all_queues(eth_dev);
1181 rte_free(hw->reset.wait_data);
1182 rte_free(eth_dev->process_private);
1183 eth_dev->process_private = NULL;
1184 hns3_mp_uninit_primary();
1185 hns3_warn(hw, "Close port %d finished", hw->data->port_id);
1189 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1190 __rte_unused int wait_to_complete)
1192 struct hns3_adapter *hns = eth_dev->data->dev_private;
1193 struct hns3_hw *hw = &hns->hw;
1194 struct hns3_mac *mac = &hw->mac;
1195 struct rte_eth_link new_link;
1197 hns3vf_request_link_info(hw);
1199 memset(&new_link, 0, sizeof(new_link));
1200 switch (mac->link_speed) {
1201 case ETH_SPEED_NUM_10M:
1202 case ETH_SPEED_NUM_100M:
1203 case ETH_SPEED_NUM_1G:
1204 case ETH_SPEED_NUM_10G:
1205 case ETH_SPEED_NUM_25G:
1206 case ETH_SPEED_NUM_40G:
1207 case ETH_SPEED_NUM_50G:
1208 case ETH_SPEED_NUM_100G:
1209 new_link.link_speed = mac->link_speed;
1212 new_link.link_speed = ETH_SPEED_NUM_100M;
1216 new_link.link_duplex = mac->link_duplex;
1217 new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
1218 new_link.link_autoneg =
1219 !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
1221 return rte_eth_linkstatus_set(eth_dev, &new_link);
1225 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1227 struct hns3_hw *hw = &hns->hw;
1230 hns3vf_set_tc_info(hns);
1232 ret = hns3_start_queues(hns, reset_queue);
1234 hns3_err(hw, "Failed to start queues: %d", ret);
1242 hns3vf_dev_start(struct rte_eth_dev *eth_dev)
1244 struct hns3_adapter *hns = eth_dev->data->dev_private;
1245 struct hns3_hw *hw = &hns->hw;
1248 PMD_INIT_FUNC_TRACE();
1249 if (rte_atomic16_read(&hw->reset.resetting))
1251 rte_spinlock_lock(&hw->lock);
1252 hw->adapter_state = HNS3_NIC_STARTING;
1253 ret = hns3vf_do_start(hns, true);
1255 hw->adapter_state = HNS3_NIC_CONFIGURED;
1256 rte_spinlock_unlock(&hw->lock);
1259 hw->adapter_state = HNS3_NIC_STARTED;
1260 rte_spinlock_unlock(&hw->lock);
1261 hns3_set_rxtx_function(eth_dev);
1262 hns3_mp_req_start_rxtx(eth_dev);
1267 is_vf_reset_done(struct hns3_hw *hw)
1269 #define HNS3_FUN_RST_ING_BITS \
1270 (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1271 BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1272 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1273 BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1277 if (hw->reset.level == HNS3_VF_RESET) {
1278 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1279 if (val & HNS3_VF_RST_ING_BIT)
1282 val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1283 if (val & HNS3_FUN_RST_ING_BITS)
1290 hns3vf_is_reset_pending(struct hns3_adapter *hns)
1292 struct hns3_hw *hw = &hns->hw;
1293 enum hns3_reset_level reset;
1295 hns3vf_check_event_cause(hns, NULL);
1296 reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
1297 if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
1298 hns3_warn(hw, "High level reset %d is pending", reset);
1305 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1307 struct hns3_hw *hw = &hns->hw;
1308 struct hns3_wait_data *wait_data = hw->reset.wait_data;
1311 if (wait_data->result == HNS3_WAIT_SUCCESS)
1313 else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1314 gettimeofday(&tv, NULL);
1315 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1316 tv.tv_sec, tv.tv_usec);
1318 } else if (wait_data->result == HNS3_WAIT_REQUEST)
1321 wait_data->hns = hns;
1322 wait_data->check_completion = is_vf_reset_done;
1323 wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1324 HNS3VF_RESET_WAIT_MS + get_timeofday_ms();
1325 wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1326 wait_data->count = HNS3VF_RESET_WAIT_CNT;
1327 wait_data->result = HNS3_WAIT_REQUEST;
1328 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1333 hns3vf_prepare_reset(struct hns3_adapter *hns)
1335 struct hns3_hw *hw = &hns->hw;
1338 if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1339 ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
1342 rte_atomic16_set(&hw->reset.disable_cmd, 1);
1348 hns3vf_stop_service(struct hns3_adapter *hns)
1350 struct hns3_hw *hw = &hns->hw;
1351 struct rte_eth_dev *eth_dev;
1353 eth_dev = &rte_eth_devices[hw->data->port_id];
1354 rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
1355 hw->mac.link_status = ETH_LINK_DOWN;
1357 hns3_set_rxtx_function(eth_dev);
1359 /* Disable datapath on secondary process. */
1360 hns3_mp_req_stop_rxtx(eth_dev);
1361 rte_delay_ms(hw->tqps_num);
1363 rte_spinlock_lock(&hw->lock);
1364 if (hw->adapter_state == HNS3_NIC_STARTED ||
1365 hw->adapter_state == HNS3_NIC_STOPPING) {
1366 hns3vf_do_stop(hns);
1367 hw->reset.mbuf_deferred_free = true;
1369 hw->reset.mbuf_deferred_free = false;
1372 * It is cumbersome for hardware to pick-and-choose entries for deletion
1373 * from table space. Hence, for function reset software intervention is
1374 * required to delete the entries.
1376 if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
1377 hns3vf_configure_all_mc_mac_addr(hns, true);
1378 rte_spinlock_unlock(&hw->lock);
1384 hns3vf_start_service(struct hns3_adapter *hns)
1386 struct hns3_hw *hw = &hns->hw;
1387 struct rte_eth_dev *eth_dev;
1389 eth_dev = &rte_eth_devices[hw->data->port_id];
1390 hns3_set_rxtx_function(eth_dev);
1391 hns3_mp_req_start_rxtx(eth_dev);
1393 hns3vf_service_handler(eth_dev);
1398 hns3vf_restore_conf(struct hns3_adapter *hns)
1400 struct hns3_hw *hw = &hns->hw;
1403 ret = hns3vf_configure_mac_addr(hns, false);
1407 ret = hns3vf_configure_all_mc_mac_addr(hns, false);
1411 ret = hns3vf_restore_vlan_conf(hns);
1413 goto err_vlan_table;
1415 if (hw->adapter_state == HNS3_NIC_STARTED) {
1416 ret = hns3vf_do_start(hns, false);
1418 goto err_vlan_table;
1419 hns3_info(hw, "hns3vf dev restart successful!");
1420 } else if (hw->adapter_state == HNS3_NIC_STOPPING)
1421 hw->adapter_state = HNS3_NIC_CONFIGURED;
1425 hns3vf_configure_all_mc_mac_addr(hns, true);
1427 hns3vf_configure_mac_addr(hns, true);
1431 static enum hns3_reset_level
1432 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
1434 enum hns3_reset_level reset_level;
1436 /* return the highest priority reset level amongst all */
1437 if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
1438 reset_level = HNS3_VF_RESET;
1439 else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
1440 reset_level = HNS3_VF_FULL_RESET;
1441 else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
1442 reset_level = HNS3_VF_PF_FUNC_RESET;
1443 else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
1444 reset_level = HNS3_VF_FUNC_RESET;
1445 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
1446 reset_level = HNS3_FLR_RESET;
1448 reset_level = HNS3_NONE_RESET;
1450 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
1451 return HNS3_NONE_RESET;
1457 hns3vf_reset_service(void *param)
1459 struct hns3_adapter *hns = (struct hns3_adapter *)param;
1460 struct hns3_hw *hw = &hns->hw;
1461 enum hns3_reset_level reset_level;
1462 struct timeval tv_delta;
1463 struct timeval tv_start;
1468 * The interrupt is not triggered within the delay time.
1469 * The interrupt may have been lost. It is necessary to handle
1470 * the interrupt to recover from the error.
1472 if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
1473 rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
1474 hns3_err(hw, "Handling interrupts in delayed tasks");
1475 hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
1477 rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
1480 * Hardware reset has been notified, we now have to poll & check if
1481 * hardware has actually completed the reset sequence.
1483 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
1484 if (reset_level != HNS3_NONE_RESET) {
1485 gettimeofday(&tv_start, NULL);
1486 hns3_reset_process(hns, reset_level);
1487 gettimeofday(&tv, NULL);
1488 timersub(&tv, &tv_start, &tv_delta);
1489 msec = tv_delta.tv_sec * MSEC_PER_SEC +
1490 tv_delta.tv_usec / USEC_PER_MSEC;
1491 if (msec > HNS3_RESET_PROCESS_MS)
1492 hns3_err(hw, "%d handle long time delta %" PRIx64
1493 " ms time=%ld.%.6ld",
1494 hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
1499 hns3vf_reinit_dev(struct hns3_adapter *hns)
1501 struct hns3_hw *hw = &hns->hw;
1504 /* Firmware command initialize */
1505 ret = hns3_cmd_init(hw);
1507 hns3_err(hw, "Failed to init cmd: %d", ret);
1511 ret = hns3_reset_all_queues(hns);
1513 hns3_err(hw, "Failed to reset all queues: %d", ret);
1517 ret = hns3vf_init_hardware(hns);
1519 hns3_err(hw, "Failed to init hardware: %d", ret);
1526 hns3_cmd_uninit(hw);
1530 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
1531 .dev_start = hns3vf_dev_start,
1532 .dev_stop = hns3vf_dev_stop,
1533 .dev_close = hns3vf_dev_close,
1534 .mtu_set = hns3vf_dev_mtu_set,
1535 .stats_get = hns3_stats_get,
1536 .stats_reset = hns3_stats_reset,
1537 .xstats_get = hns3_dev_xstats_get,
1538 .xstats_get_names = hns3_dev_xstats_get_names,
1539 .xstats_reset = hns3_dev_xstats_reset,
1540 .xstats_get_by_id = hns3_dev_xstats_get_by_id,
1541 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
1542 .dev_infos_get = hns3vf_dev_infos_get,
1543 .rx_queue_setup = hns3_rx_queue_setup,
1544 .tx_queue_setup = hns3_tx_queue_setup,
1545 .rx_queue_release = hns3_dev_rx_queue_release,
1546 .tx_queue_release = hns3_dev_tx_queue_release,
1547 .dev_configure = hns3vf_dev_configure,
1548 .mac_addr_add = hns3vf_add_mac_addr,
1549 .mac_addr_remove = hns3vf_remove_mac_addr,
1550 .mac_addr_set = hns3vf_set_default_mac_addr,
1551 .set_mc_addr_list = hns3vf_set_mc_mac_addr_list,
1552 .link_update = hns3vf_dev_link_update,
1553 .rss_hash_update = hns3_dev_rss_hash_update,
1554 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
1555 .reta_update = hns3_dev_rss_reta_update,
1556 .reta_query = hns3_dev_rss_reta_query,
1557 .filter_ctrl = hns3_dev_filter_ctrl,
1558 .vlan_filter_set = hns3vf_vlan_filter_set,
1559 .vlan_offload_set = hns3vf_vlan_offload_set,
1560 .get_reg = hns3_get_regs,
1561 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
1564 static const struct hns3_reset_ops hns3vf_reset_ops = {
1565 .reset_service = hns3vf_reset_service,
1566 .stop_service = hns3vf_stop_service,
1567 .prepare_reset = hns3vf_prepare_reset,
1568 .wait_hardware_ready = hns3vf_wait_hardware_ready,
1569 .reinit_dev = hns3vf_reinit_dev,
1570 .restore_conf = hns3vf_restore_conf,
1571 .start_service = hns3vf_start_service,
1575 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
1577 struct hns3_adapter *hns = eth_dev->data->dev_private;
1578 struct hns3_hw *hw = &hns->hw;
1581 PMD_INIT_FUNC_TRACE();
1583 eth_dev->process_private = (struct hns3_process_private *)
1584 rte_zmalloc_socket("hns3_filter_list",
1585 sizeof(struct hns3_process_private),
1586 RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
1587 if (eth_dev->process_private == NULL) {
1588 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
1592 /* initialize flow filter lists */
1593 hns3_filterlist_init(eth_dev);
1595 hns3_set_rxtx_function(eth_dev);
1596 eth_dev->dev_ops = &hns3vf_eth_dev_ops;
1597 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1598 hns3_mp_init_secondary();
1599 hw->secondary_cnt++;
1603 hns3_mp_init_primary();
1605 hw->adapter_state = HNS3_NIC_UNINITIALIZED;
1607 hw->data = eth_dev->data;
1609 ret = hns3_reset_init(hw);
1611 goto err_init_reset;
1612 hw->reset.ops = &hns3vf_reset_ops;
1614 ret = hns3vf_init_vf(eth_dev);
1616 PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
1620 /* Allocate memory for storing MAC addresses */
1621 eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac",
1622 sizeof(struct rte_ether_addr) *
1623 HNS3_VF_UC_MACADDR_NUM, 0);
1624 if (eth_dev->data->mac_addrs == NULL) {
1625 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
1626 "to store MAC addresses",
1627 sizeof(struct rte_ether_addr) *
1628 HNS3_VF_UC_MACADDR_NUM);
1630 goto err_rte_zmalloc;
1633 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
1634 ð_dev->data->mac_addrs[0]);
1635 hw->adapter_state = HNS3_NIC_INITIALIZED;
1637 * Pass the information to the rte_eth_dev_close() that it should also
1638 * release the private port resources.
1640 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1642 if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
1643 hns3_err(hw, "Reschedule reset service after dev_init");
1644 hns3_schedule_reset(hns);
1646 /* IMP will wait ready flag before reset */
1647 hns3_notify_reset_ready(hw, false);
1649 rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1651 rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1656 hns3vf_uninit_vf(eth_dev);
1659 rte_free(hw->reset.wait_data);
1662 eth_dev->dev_ops = NULL;
1663 eth_dev->rx_pkt_burst = NULL;
1664 eth_dev->tx_pkt_burst = NULL;
1665 eth_dev->tx_pkt_prepare = NULL;
1666 rte_free(eth_dev->process_private);
1667 eth_dev->process_private = NULL;
1673 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
1675 struct hns3_adapter *hns = eth_dev->data->dev_private;
1676 struct hns3_hw *hw = &hns->hw;
1678 PMD_INIT_FUNC_TRACE();
1680 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1683 eth_dev->dev_ops = NULL;
1684 eth_dev->rx_pkt_burst = NULL;
1685 eth_dev->tx_pkt_burst = NULL;
1686 eth_dev->tx_pkt_prepare = NULL;
1688 if (hw->adapter_state < HNS3_NIC_CLOSING)
1689 hns3vf_dev_close(eth_dev);
1691 hw->adapter_state = HNS3_NIC_REMOVED;
1696 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1697 struct rte_pci_device *pci_dev)
1699 return rte_eth_dev_pci_generic_probe(pci_dev,
1700 sizeof(struct hns3_adapter),
1705 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
1707 return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
1710 static const struct rte_pci_id pci_id_hns3vf_map[] = {
1711 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
1712 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
1713 { .vendor_id = 0, /* sentinel */ },
1716 static struct rte_pci_driver rte_hns3vf_pmd = {
1717 .id_table = pci_id_hns3vf_map,
1718 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1719 .probe = eth_hns3vf_pci_probe,
1720 .remove = eth_hns3vf_pci_remove,
1723 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
1724 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
1725 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");