net/hns3: remove getting number of queue descriptors from FW
[dpdk.git] / drivers / net / hns3 / hns3_ethdev_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <linux/pci_regs.h>
6 #include <rte_alarm.h>
7 #include <ethdev_pci.h>
8 #include <rte_io.h>
9 #include <rte_pci.h>
10 #include <rte_vfio.h>
11
12 #include "hns3_ethdev.h"
13 #include "hns3_common.h"
14 #include "hns3_logs.h"
15 #include "hns3_rxtx.h"
16 #include "hns3_regs.h"
17 #include "hns3_intr.h"
18 #include "hns3_dcb.h"
19 #include "hns3_mp.h"
20 #include "hns3_flow.h"
21
22 #define HNS3VF_KEEP_ALIVE_INTERVAL      2000000 /* us */
23 #define HNS3VF_SERVICE_INTERVAL         1000000 /* us */
24
25 #define HNS3VF_RESET_WAIT_MS    20
26 #define HNS3VF_RESET_WAIT_CNT   2000
27
28 /* Reset related Registers */
29 #define HNS3_GLOBAL_RESET_BIT           0
30 #define HNS3_CORE_RESET_BIT             1
31 #define HNS3_IMP_RESET_BIT              2
32 #define HNS3_FUN_RST_ING_B              0
33
34 enum hns3vf_evt_cause {
35         HNS3VF_VECTOR0_EVENT_RST,
36         HNS3VF_VECTOR0_EVENT_MBX,
37         HNS3VF_VECTOR0_EVENT_OTHER,
38 };
39
40 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
41                                                     uint64_t *levels);
42 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
43 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
44
45 static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
46                                   struct rte_ether_addr *mac_addr);
47 static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
48                                      struct rte_ether_addr *mac_addr);
49 static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
50                                    __rte_unused int wait_to_complete);
51
52 /* set PCI bus mastering */
53 static int
54 hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
55 {
56         uint16_t reg;
57         int ret;
58
59         ret = rte_pci_read_config(device, &reg, sizeof(reg), PCI_COMMAND);
60         if (ret < 0) {
61                 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
62                              PCI_COMMAND);
63                 return ret;
64         }
65
66         if (op)
67                 /* set the master bit */
68                 reg |= PCI_COMMAND_MASTER;
69         else
70                 reg &= ~(PCI_COMMAND_MASTER);
71
72         return rte_pci_write_config(device, &reg, sizeof(reg), PCI_COMMAND);
73 }
74
75 /**
76  * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
77  * @cap: the capability
78  *
79  * Return the address of the given capability within the PCI capability list.
80  */
81 static int
82 hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
83 {
84 #define MAX_PCIE_CAPABILITY 48
85         uint16_t status;
86         uint8_t pos;
87         uint8_t id;
88         int ttl;
89         int ret;
90
91         ret = rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
92         if (ret < 0) {
93                 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_STATUS);
94                 return 0;
95         }
96
97         if (!(status & PCI_STATUS_CAP_LIST))
98                 return 0;
99
100         ttl = MAX_PCIE_CAPABILITY;
101         ret = rte_pci_read_config(device, &pos, sizeof(pos),
102                                   PCI_CAPABILITY_LIST);
103         if (ret < 0) {
104                 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
105                              PCI_CAPABILITY_LIST);
106                 return 0;
107         }
108
109         while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
110                 ret = rte_pci_read_config(device, &id, sizeof(id),
111                                           (pos + PCI_CAP_LIST_ID));
112                 if (ret < 0) {
113                         PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
114                                      (pos + PCI_CAP_LIST_ID));
115                         break;
116                 }
117
118                 if (id == 0xFF)
119                         break;
120
121                 if (id == cap)
122                         return (int)pos;
123
124                 ret = rte_pci_read_config(device, &pos, sizeof(pos),
125                                           (pos + PCI_CAP_LIST_NEXT));
126                 if (ret < 0) {
127                         PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
128                                      (pos + PCI_CAP_LIST_NEXT));
129                         break;
130                 }
131         }
132         return 0;
133 }
134
135 static int
136 hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
137 {
138         uint16_t control;
139         int pos;
140         int ret;
141
142         pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
143         if (pos) {
144                 ret = rte_pci_read_config(device, &control, sizeof(control),
145                                     (pos + PCI_MSIX_FLAGS));
146                 if (ret < 0) {
147                         PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
148                                      (pos + PCI_MSIX_FLAGS));
149                         return -ENXIO;
150                 }
151
152                 if (op)
153                         control |= PCI_MSIX_FLAGS_ENABLE;
154                 else
155                         control &= ~PCI_MSIX_FLAGS_ENABLE;
156                 ret = rte_pci_write_config(device, &control, sizeof(control),
157                                           (pos + PCI_MSIX_FLAGS));
158                 if (ret < 0) {
159                         PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x",
160                                     (pos + PCI_MSIX_FLAGS));
161                         return -ENXIO;
162                 }
163
164                 return 0;
165         }
166
167         return -ENXIO;
168 }
169
170 static int
171 hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
172 {
173         /* mac address was checked by upper level interface */
174         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
175         int ret;
176
177         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
178                                 HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
179                                 RTE_ETHER_ADDR_LEN, false, NULL, 0);
180         if (ret) {
181                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
182                                       mac_addr);
183                 hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
184                          mac_str, ret);
185         }
186         return ret;
187 }
188
189 static int
190 hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
191 {
192         /* mac address was checked by upper level interface */
193         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
194         int ret;
195
196         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
197                                 HNS3_MBX_MAC_VLAN_UC_REMOVE,
198                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN,
199                                 false, NULL, 0);
200         if (ret) {
201                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
202                                       mac_addr);
203                 hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
204                          mac_str, ret);
205         }
206         return ret;
207 }
208
209 static int
210 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
211                             struct rte_ether_addr *mac_addr)
212 {
213 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
214         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
215         struct rte_ether_addr *old_addr;
216         uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
217         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
218         int ret;
219
220         /*
221          * It has been guaranteed that input parameter named mac_addr is valid
222          * address in the rte layer of DPDK framework.
223          */
224         old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
225         rte_spinlock_lock(&hw->lock);
226         memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
227         memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
228                RTE_ETHER_ADDR_LEN);
229
230         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
231                                 HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
232                                 HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
233         if (ret) {
234                 /*
235                  * The hns3 VF PMD depends on the hns3 PF kernel ethdev
236                  * driver. When user has configured a MAC address for VF device
237                  * by "ip link set ..." command based on the PF device, the hns3
238                  * PF kernel ethdev driver does not allow VF driver to request
239                  * reconfiguring a different default MAC address, and return
240                  * -EPREM to VF driver through mailbox.
241                  */
242                 if (ret == -EPERM) {
243                         hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
244                                               old_addr);
245                         hns3_warn(hw, "Has permanent mac addr(%s) for vf",
246                                   mac_str);
247                 } else {
248                         hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
249                                               mac_addr);
250                         hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
251                                  mac_str, ret);
252                 }
253         }
254
255         rte_ether_addr_copy(mac_addr,
256                             (struct rte_ether_addr *)hw->mac.mac_addr);
257         rte_spinlock_unlock(&hw->lock);
258
259         return ret;
260 }
261
262 static int
263 hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
264                        struct rte_ether_addr *mac_addr)
265 {
266         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
267         int ret;
268
269         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
270                                 HNS3_MBX_MAC_VLAN_MC_ADD,
271                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
272                                 NULL, 0);
273         if (ret) {
274                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
275                                       mac_addr);
276                 hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
277                          mac_str, ret);
278         }
279
280         return ret;
281 }
282
283 static int
284 hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
285                           struct rte_ether_addr *mac_addr)
286 {
287         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
288         int ret;
289
290         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
291                                 HNS3_MBX_MAC_VLAN_MC_REMOVE,
292                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
293                                 NULL, 0);
294         if (ret) {
295                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
296                                       mac_addr);
297                 hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
298                          mac_str, ret);
299         }
300
301         return ret;
302 }
303
304 static int
305 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
306                         bool en_uc_pmc, bool en_mc_pmc)
307 {
308         struct hns3_mbx_vf_to_pf_cmd *req;
309         struct hns3_cmd_desc desc;
310         int ret;
311
312         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
313
314         /*
315          * The hns3 VF PMD depends on the hns3 PF kernel ethdev driver,
316          * so there are some features for promiscuous/allmulticast mode in hns3
317          * VF PMD as below:
318          * 1. The promiscuous/allmulticast mode can be configured successfully
319          *    only based on the trusted VF device. If based on the non trusted
320          *    VF device, configuring promiscuous/allmulticast mode will fail.
321          *    The hns3 VF device can be configured as trusted device by hns3 PF
322          *    kernel ethdev driver on the host by the following command:
323          *      "ip link set <eth num> vf <vf id> turst on"
324          * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
325          *    can receive the ingress and outgoing traffic. This includes
326          *    all the ingress packets, all the packets sent from the PF and
327          *    other VFs on the same physical port.
328          * 3. Note: Because of the hardware constraints, By default vlan filter
329          *    is enabled and couldn't be turned off based on VF device, so vlan
330          *    filter is still effective even in promiscuous mode. If upper
331          *    applications don't call rte_eth_dev_vlan_filter API function to
332          *    set vlan based on VF device, hns3 VF PMD will can't receive
333          *    the packets with vlan tag in promiscuous mode.
334          */
335         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
336         req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
337         req->msg[1] = en_bc_pmc ? 1 : 0;
338         req->msg[2] = en_uc_pmc ? 1 : 0;
339         req->msg[3] = en_mc_pmc ? 1 : 0;
340         req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0;
341
342         ret = hns3_cmd_send(hw, &desc, 1);
343         if (ret)
344                 hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
345
346         return ret;
347 }
348
349 static int
350 hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
351 {
352         struct hns3_adapter *hns = dev->data->dev_private;
353         struct hns3_hw *hw = &hns->hw;
354         int ret;
355
356         ret = hns3vf_set_promisc_mode(hw, true, true, true);
357         if (ret)
358                 hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
359                         ret);
360         return ret;
361 }
362
363 static int
364 hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
365 {
366         bool allmulti = dev->data->all_multicast ? true : false;
367         struct hns3_adapter *hns = dev->data->dev_private;
368         struct hns3_hw *hw = &hns->hw;
369         int ret;
370
371         ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
372         if (ret)
373                 hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
374                         ret);
375         return ret;
376 }
377
378 static int
379 hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
380 {
381         struct hns3_adapter *hns = dev->data->dev_private;
382         struct hns3_hw *hw = &hns->hw;
383         int ret;
384
385         if (dev->data->promiscuous)
386                 return 0;
387
388         ret = hns3vf_set_promisc_mode(hw, true, false, true);
389         if (ret)
390                 hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
391                         ret);
392         return ret;
393 }
394
395 static int
396 hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
397 {
398         struct hns3_adapter *hns = dev->data->dev_private;
399         struct hns3_hw *hw = &hns->hw;
400         int ret;
401
402         if (dev->data->promiscuous)
403                 return 0;
404
405         ret = hns3vf_set_promisc_mode(hw, true, false, false);
406         if (ret)
407                 hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
408                         ret);
409         return ret;
410 }
411
412 static int
413 hns3vf_restore_promisc(struct hns3_adapter *hns)
414 {
415         struct hns3_hw *hw = &hns->hw;
416         bool allmulti = hw->data->all_multicast ? true : false;
417
418         if (hw->data->promiscuous)
419                 return hns3vf_set_promisc_mode(hw, true, true, true);
420
421         return hns3vf_set_promisc_mode(hw, true, false, allmulti);
422 }
423
424 static int
425 hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id,
426                              bool mmap, enum hns3_ring_type queue_type,
427                              uint16_t queue_id)
428 {
429         struct hns3_vf_bind_vector_msg bind_msg;
430         const char *op_str;
431         uint16_t code;
432         int ret;
433
434         memset(&bind_msg, 0, sizeof(bind_msg));
435         code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
436                 HNS3_MBX_UNMAP_RING_TO_VECTOR;
437         bind_msg.vector_id = (uint8_t)vector_id;
438
439         if (queue_type == HNS3_RING_TYPE_RX)
440                 bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
441         else
442                 bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
443
444         bind_msg.param[0].ring_type = queue_type;
445         bind_msg.ring_num = 1;
446         bind_msg.param[0].tqp_index = queue_id;
447         op_str = mmap ? "Map" : "Unmap";
448         ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
449                                 sizeof(bind_msg), false, NULL, 0);
450         if (ret)
451                 hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.",
452                          op_str, queue_id, bind_msg.vector_id, ret);
453
454         return ret;
455 }
456
457 static int
458 hns3vf_dev_configure(struct rte_eth_dev *dev)
459 {
460         struct hns3_adapter *hns = dev->data->dev_private;
461         struct hns3_hw *hw = &hns->hw;
462         struct rte_eth_conf *conf = &dev->data->dev_conf;
463         enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
464         uint16_t nb_rx_q = dev->data->nb_rx_queues;
465         uint16_t nb_tx_q = dev->data->nb_tx_queues;
466         struct rte_eth_rss_conf rss_conf;
467         bool gro_en;
468         int ret;
469
470         hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
471
472         /*
473          * Some versions of hardware network engine does not support
474          * individually enable/disable/reset the Tx or Rx queue. These devices
475          * must enable/disable/reset Tx and Rx queues at the same time. When the
476          * numbers of Tx queues allocated by upper applications are not equal to
477          * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
478          * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
479          * work as usual. But these fake queues are imperceptible, and can not
480          * be used by upper applications.
481          */
482         ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
483         if (ret) {
484                 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
485                 hw->cfg_max_queues = 0;
486                 return ret;
487         }
488
489         hw->adapter_state = HNS3_NIC_CONFIGURING;
490         if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
491                 hns3_err(hw, "setting link speed/duplex not supported");
492                 ret = -EINVAL;
493                 goto cfg_err;
494         }
495
496         /* When RSS is not configured, redirect the packet queue 0 */
497         if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
498                 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
499                 hw->rss_dis_flag = false;
500                 rss_conf = conf->rx_adv_conf.rss_conf;
501                 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
502                 if (ret)
503                         goto cfg_err;
504         }
505
506         ret = hns3vf_dev_mtu_set(dev, conf->rxmode.mtu);
507         if (ret != 0)
508                 goto cfg_err;
509
510         ret = hns3vf_dev_configure_vlan(dev);
511         if (ret)
512                 goto cfg_err;
513
514         /* config hardware GRO */
515         gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
516         ret = hns3_config_gro(hw, gro_en);
517         if (ret)
518                 goto cfg_err;
519
520         hns3_init_rx_ptype_tble(dev);
521
522         hw->adapter_state = HNS3_NIC_CONFIGURED;
523         return 0;
524
525 cfg_err:
526         hw->cfg_max_queues = 0;
527         (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
528         hw->adapter_state = HNS3_NIC_INITIALIZED;
529
530         return ret;
531 }
532
533 static int
534 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
535 {
536         int ret;
537
538         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
539                                 sizeof(mtu), true, NULL, 0);
540         if (ret)
541                 hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
542
543         return ret;
544 }
545
546 static int
547 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
548 {
549         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
550         uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
551         int ret;
552
553         /*
554          * The hns3 PF/VF devices on the same port share the hardware MTU
555          * configuration. Currently, we send mailbox to inform hns3 PF kernel
556          * ethdev driver to finish hardware MTU configuration in hns3 VF PMD,
557          * there is no need to stop the port for hns3 VF device, and the
558          * MTU value issued by hns3 VF PMD must be less than or equal to
559          * PF's MTU.
560          */
561         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
562                 hns3_err(hw, "Failed to set mtu during resetting");
563                 return -EIO;
564         }
565
566         /*
567          * when Rx of scattered packets is off, we have some possibility of
568          * using vector Rx process function or simple Rx functions in hns3 PMD.
569          * If the input MTU is increased and the maximum length of
570          * received packets is greater than the length of a buffer for Rx
571          * packet, the hardware network engine needs to use multiple BDs and
572          * buffers to store these packets. This will cause problems when still
573          * using vector Rx process function or simple Rx function to receiving
574          * packets. So, when Rx of scattered packets is off and device is
575          * started, it is not permitted to increase MTU so that the maximum
576          * length of Rx packets is greater than Rx buffer length.
577          */
578         if (dev->data->dev_started && !dev->data->scattered_rx &&
579             frame_size > hw->rx_buf_len) {
580                 hns3_err(hw, "failed to set mtu because current is "
581                         "not scattered rx mode");
582                 return -EOPNOTSUPP;
583         }
584
585         rte_spinlock_lock(&hw->lock);
586         ret = hns3vf_config_mtu(hw, mtu);
587         if (ret) {
588                 rte_spinlock_unlock(&hw->lock);
589                 return ret;
590         }
591         rte_spinlock_unlock(&hw->lock);
592
593         return 0;
594 }
595
596 static void
597 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
598 {
599         hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
600 }
601
602 static void
603 hns3vf_disable_irq0(struct hns3_hw *hw)
604 {
605         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
606 }
607
608 static void
609 hns3vf_enable_irq0(struct hns3_hw *hw)
610 {
611         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
612 }
613
614 static enum hns3vf_evt_cause
615 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
616 {
617         struct hns3_hw *hw = &hns->hw;
618         enum hns3vf_evt_cause ret;
619         uint32_t cmdq_stat_reg;
620         uint32_t rst_ing_reg;
621         uint32_t val;
622
623         /* Fetch the events from their corresponding regs */
624         cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
625         if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
626                 rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
627                 hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
628                 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
629                 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
630                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
631                 hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
632                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
633                 if (clearval) {
634                         hw->reset.stats.global_cnt++;
635                         hns3_warn(hw, "Global reset detected, clear reset status");
636                 } else {
637                         hns3_schedule_delayed_reset(hns);
638                         hns3_warn(hw, "Global reset detected, don't clear reset status");
639                 }
640
641                 ret = HNS3VF_VECTOR0_EVENT_RST;
642                 goto out;
643         }
644
645         /* Check for vector0 mailbox(=CMDQ RX) event source */
646         if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
647                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
648                 ret = HNS3VF_VECTOR0_EVENT_MBX;
649                 goto out;
650         }
651
652         val = 0;
653         ret = HNS3VF_VECTOR0_EVENT_OTHER;
654 out:
655         if (clearval)
656                 *clearval = val;
657         return ret;
658 }
659
660 static void
661 hns3vf_interrupt_handler(void *param)
662 {
663         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
664         struct hns3_adapter *hns = dev->data->dev_private;
665         struct hns3_hw *hw = &hns->hw;
666         enum hns3vf_evt_cause event_cause;
667         uint32_t clearval;
668
669         /* Disable interrupt */
670         hns3vf_disable_irq0(hw);
671
672         /* Read out interrupt causes */
673         event_cause = hns3vf_check_event_cause(hns, &clearval);
674         /* Clear interrupt causes */
675         hns3vf_clear_event_cause(hw, clearval);
676
677         switch (event_cause) {
678         case HNS3VF_VECTOR0_EVENT_RST:
679                 hns3_schedule_reset(hns);
680                 break;
681         case HNS3VF_VECTOR0_EVENT_MBX:
682                 hns3_dev_handle_mbx_msg(hw);
683                 break;
684         default:
685                 break;
686         }
687
688         /* Enable interrupt */
689         hns3vf_enable_irq0(hw);
690 }
691
692 static void
693 hns3vf_set_default_dev_specifications(struct hns3_hw *hw)
694 {
695         hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
696         hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
697         hw->rss_key_size = HNS3_RSS_KEY_SIZE;
698         hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
699 }
700
701 static void
702 hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
703 {
704         struct hns3_dev_specs_0_cmd *req0;
705
706         req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
707
708         hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
709         hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
710         hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
711         hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
712 }
713
714 static int
715 hns3vf_check_dev_specifications(struct hns3_hw *hw)
716 {
717         if (hw->rss_ind_tbl_size == 0 ||
718             hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
719                 hns3_warn(hw, "the size of hash lookup table configured (%u)"
720                               " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
721                               HNS3_RSS_IND_TBL_SIZE_MAX);
722                 return -EINVAL;
723         }
724
725         return 0;
726 }
727
728 static int
729 hns3vf_query_dev_specifications(struct hns3_hw *hw)
730 {
731         struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
732         int ret;
733         int i;
734
735         for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
736                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
737                                           true);
738                 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
739         }
740         hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
741
742         ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
743         if (ret)
744                 return ret;
745
746         hns3vf_parse_dev_specifications(hw, desc);
747
748         return hns3vf_check_dev_specifications(hw);
749 }
750
751 void
752 hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
753 {
754         uint16_t val = supported ? HNS3_PF_PUSH_LSC_CAP_SUPPORTED :
755                                    HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
756         uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
757         struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
758
759         if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
760                 __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
761                                           __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
762 }
763
764 static void
765 hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
766 {
767 #define HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS      500
768
769         struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
770         int32_t remain_ms = HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS;
771         uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
772         uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
773         struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
774
775         __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
776                          __ATOMIC_RELEASE);
777
778         (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
779                                 NULL, 0);
780
781         while (remain_ms > 0) {
782                 rte_delay_ms(HNS3_POLL_RESPONE_MS);
783                 if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
784                         HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
785                         break;
786                 remain_ms--;
787         }
788
789         /*
790          * When exit above loop, the pf_push_lsc_cap could be one of the three
791          * state: unknown (means pf not ack), not_supported, supported.
792          * Here config it as 'not_supported' when it's 'unknown' state.
793          */
794         __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
795                                   __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
796
797         if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
798                 HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
799                 hns3_info(hw, "detect PF support push link status change!");
800         } else {
801                 /*
802                  * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
803                  * declared RTE_PCI_DRV_INTR_LSC in drv_flags. So here cleared
804                  * the RTE_ETH_DEV_INTR_LSC capability.
805                  */
806                 dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
807         }
808 }
809
810 static int
811 hns3vf_get_capability(struct hns3_hw *hw)
812 {
813         struct rte_pci_device *pci_dev;
814         struct rte_eth_dev *eth_dev;
815         uint8_t revision;
816         int ret;
817
818         eth_dev = &rte_eth_devices[hw->data->port_id];
819         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
820
821         /* Get PCI revision id */
822         ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
823                                   HNS3_PCI_REVISION_ID);
824         if (ret != HNS3_PCI_REVISION_ID_LEN) {
825                 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
826                              ret);
827                 return -EIO;
828         }
829         hw->revision = revision;
830
831         if (revision < PCI_REVISION_ID_HIP09_A) {
832                 hns3vf_set_default_dev_specifications(hw);
833                 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
834                 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
835                 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
836                 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
837                 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
838                 hw->rss_info.ipv6_sctp_offload_supported = false;
839                 hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
840                 return 0;
841         }
842
843         ret = hns3vf_query_dev_specifications(hw);
844         if (ret) {
845                 PMD_INIT_LOG(ERR,
846                              "failed to query dev specifications, ret = %d",
847                              ret);
848                 return ret;
849         }
850
851         hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
852         hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
853         hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
854         hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
855         hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
856         hw->rss_info.ipv6_sctp_offload_supported = true;
857         hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
858
859         return 0;
860 }
861
862 static int
863 hns3vf_check_tqp_info(struct hns3_hw *hw)
864 {
865         if (hw->tqps_num == 0) {
866                 PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF.");
867                 return -EINVAL;
868         }
869
870         if (hw->rss_size_max == 0) {
871                 PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF.");
872                 return -EINVAL;
873         }
874
875         hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num);
876
877         return 0;
878 }
879
880 static int
881 hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
882 {
883         uint8_t resp_msg;
884         int ret;
885
886         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
887                                 HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0,
888                                 true, &resp_msg, sizeof(resp_msg));
889         if (ret) {
890                 if (ret == -ETIME) {
891                         /*
892                          * Getting current port based VLAN state from PF driver
893                          * will not affect VF driver's basic function. Because
894                          * the VF driver relies on hns3 PF kernel ether driver,
895                          * to avoid introducing compatibility issues with older
896                          * version of PF driver, no failure will be returned
897                          * when the return value is ETIME. This return value has
898                          * the following scenarios:
899                          * 1) Firmware didn't return the results in time
900                          * 2) the result return by firmware is timeout
901                          * 3) the older version of kernel side PF driver does
902                          *    not support this mailbox message.
903                          * For scenarios 1 and 2, it is most likely that a
904                          * hardware error has occurred, or a hardware reset has
905                          * occurred. In this case, these errors will be caught
906                          * by other functions.
907                          */
908                         PMD_INIT_LOG(WARNING,
909                                 "failed to get PVID state for timeout, maybe "
910                                 "kernel side PF driver doesn't support this "
911                                 "mailbox message, or firmware didn't respond.");
912                         resp_msg = HNS3_PORT_BASE_VLAN_DISABLE;
913                 } else {
914                         PMD_INIT_LOG(ERR, "failed to get port based VLAN state,"
915                                 " ret = %d", ret);
916                         return ret;
917                 }
918         }
919         hw->port_base_vlan_cfg.state = resp_msg ?
920                 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
921         return 0;
922 }
923
924 static int
925 hns3vf_get_queue_info(struct hns3_hw *hw)
926 {
927 #define HNS3VF_TQPS_RSS_INFO_LEN        6
928         uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
929         int ret;
930
931         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
932                                 resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
933         if (ret) {
934                 PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
935                 return ret;
936         }
937
938         memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
939         memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
940
941         return hns3vf_check_tqp_info(hw);
942 }
943
944 static void
945 hns3vf_update_caps(struct hns3_hw *hw, uint32_t caps)
946 {
947         if (hns3_get_bit(caps, HNS3VF_CAPS_VLAN_FLT_MOD_B))
948                 hns3_set_bit(hw->capability,
949                                 HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 1);
950 }
951
952 static int
953 hns3vf_get_num_tc(struct hns3_hw *hw)
954 {
955         uint8_t num_tc = 0;
956         uint32_t i;
957
958         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
959                 if (hw->hw_tc_map & BIT(i))
960                         num_tc++;
961         }
962         return num_tc;
963 }
964
965 static int
966 hns3vf_get_basic_info(struct hns3_hw *hw)
967 {
968         uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE];
969         struct hns3_basic_info *basic_info;
970         int ret;
971
972         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0,
973                                 true, resp_msg, sizeof(resp_msg));
974         if (ret) {
975                 hns3_err(hw, "failed to get basic info from PF, ret = %d.",
976                                 ret);
977                 return ret;
978         }
979
980         basic_info = (struct hns3_basic_info *)resp_msg;
981         hw->hw_tc_map = basic_info->hw_tc_map;
982         hw->num_tc = hns3vf_get_num_tc(hw);
983         hw->pf_vf_if_version = basic_info->pf_vf_if_version;
984         hns3vf_update_caps(hw, basic_info->caps);
985
986         return 0;
987 }
988
989 static int
990 hns3vf_get_host_mac_addr(struct hns3_hw *hw)
991 {
992         uint8_t host_mac[RTE_ETHER_ADDR_LEN];
993         int ret;
994
995         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
996                                 true, host_mac, RTE_ETHER_ADDR_LEN);
997         if (ret) {
998                 hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
999                 return ret;
1000         }
1001
1002         memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
1003
1004         return 0;
1005 }
1006
1007 static int
1008 hns3vf_get_configuration(struct hns3_hw *hw)
1009 {
1010         int ret;
1011
1012         hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
1013         hw->rss_dis_flag = false;
1014
1015         /* Get device capability */
1016         ret = hns3vf_get_capability(hw);
1017         if (ret) {
1018                 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
1019                 return ret;
1020         }
1021
1022         hns3vf_get_push_lsc_cap(hw);
1023
1024         /* Get basic info from PF */
1025         ret = hns3vf_get_basic_info(hw);
1026         if (ret)
1027                 return ret;
1028
1029         /* Get queue configuration from PF */
1030         ret = hns3vf_get_queue_info(hw);
1031         if (ret)
1032                 return ret;
1033
1034         /* Get user defined VF MAC addr from PF */
1035         ret = hns3vf_get_host_mac_addr(hw);
1036         if (ret)
1037                 return ret;
1038
1039         return hns3vf_get_port_base_vlan_filter_state(hw);
1040 }
1041
1042 static int
1043 hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q,
1044                             uint16_t nb_tx_q)
1045 {
1046         struct hns3_hw *hw = &hns->hw;
1047
1048         return hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
1049 }
1050
1051 static void
1052 hns3vf_request_link_info(struct hns3_hw *hw)
1053 {
1054         struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1055         bool send_req;
1056         int ret;
1057
1058         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1059                 return;
1060
1061         send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
1062                    vf->req_link_info_cnt > 0;
1063         if (!send_req)
1064                 return;
1065
1066         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
1067                                 NULL, 0);
1068         if (ret) {
1069                 hns3_err(hw, "failed to fetch link status, ret = %d", ret);
1070                 return;
1071         }
1072
1073         if (vf->req_link_info_cnt > 0)
1074                 vf->req_link_info_cnt--;
1075 }
1076
1077 void
1078 hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
1079                           uint32_t link_speed, uint8_t link_duplex)
1080 {
1081         struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1082         struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1083         struct hns3_mac *mac = &hw->mac;
1084         int ret;
1085
1086         /*
1087          * PF kernel driver may push link status when VF driver is in resetting,
1088          * driver will stop polling job in this case, after resetting done
1089          * driver will start polling job again.
1090          * When polling job started, driver will get initial link status by
1091          * sending request to PF kernel driver, then could update link status by
1092          * process PF kernel driver's link status mailbox message.
1093          */
1094         if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
1095                 return;
1096
1097         if (hw->adapter_state != HNS3_NIC_STARTED)
1098                 return;
1099
1100         mac->link_status = link_status;
1101         mac->link_speed = link_speed;
1102         mac->link_duplex = link_duplex;
1103         ret = hns3vf_dev_link_update(dev, 0);
1104         if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
1105                 hns3_start_report_lse(dev);
1106 }
1107
1108 static int
1109 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
1110 {
1111 #define HNS3VF_VLAN_MBX_MSG_LEN 5
1112         struct hns3_hw *hw = &hns->hw;
1113         uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
1114         uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
1115         uint8_t is_kill = on ? 0 : 1;
1116
1117         msg_data[0] = is_kill;
1118         memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1119         memcpy(&msg_data[3], &proto, sizeof(proto));
1120
1121         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
1122                                  msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
1123                                  0);
1124 }
1125
1126 static int
1127 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1128 {
1129         struct hns3_adapter *hns = dev->data->dev_private;
1130         struct hns3_hw *hw = &hns->hw;
1131         int ret;
1132
1133         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1134                 hns3_err(hw,
1135                          "vf set vlan id failed during resetting, vlan_id =%u",
1136                          vlan_id);
1137                 return -EIO;
1138         }
1139         rte_spinlock_lock(&hw->lock);
1140         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1141         rte_spinlock_unlock(&hw->lock);
1142         if (ret)
1143                 hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
1144                          vlan_id, ret);
1145
1146         return ret;
1147 }
1148
1149 static int
1150 hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
1151 {
1152         uint8_t msg_data;
1153         int ret;
1154
1155         if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD))
1156                 return 0;
1157
1158         msg_data = enable ? 1 : 0;
1159         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
1160                         HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data,
1161                         sizeof(msg_data), true, NULL, 0);
1162         if (ret)
1163                 hns3_err(hw, "%s vlan filter failed, ret = %d.",
1164                                 enable ? "enable" : "disable", ret);
1165
1166         return ret;
1167 }
1168
1169 static int
1170 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
1171 {
1172         uint8_t msg_data;
1173         int ret;
1174
1175         msg_data = enable ? 1 : 0;
1176         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
1177                                 &msg_data, sizeof(msg_data), false, NULL, 0);
1178         if (ret)
1179                 hns3_err(hw, "vf %s strip failed, ret = %d.",
1180                                 enable ? "enable" : "disable", ret);
1181
1182         return ret;
1183 }
1184
1185 static int
1186 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1187 {
1188         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1189         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1190         unsigned int tmp_mask;
1191         int ret = 0;
1192
1193         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1194                 hns3_err(hw, "vf set vlan offload failed during resetting, "
1195                              "mask = 0x%x", mask);
1196                 return -EIO;
1197         }
1198
1199         tmp_mask = (unsigned int)mask;
1200
1201         if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
1202                 rte_spinlock_lock(&hw->lock);
1203                 /* Enable or disable VLAN filter */
1204                 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1205                         ret = hns3vf_en_vlan_filter(hw, true);
1206                 else
1207                         ret = hns3vf_en_vlan_filter(hw, false);
1208                 rte_spinlock_unlock(&hw->lock);
1209                 if (ret)
1210                         return ret;
1211         }
1212
1213         /* Vlan stripping setting */
1214         if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
1215                 rte_spinlock_lock(&hw->lock);
1216                 /* Enable or disable VLAN stripping */
1217                 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1218                         ret = hns3vf_en_hw_strip_rxvtag(hw, true);
1219                 else
1220                         ret = hns3vf_en_hw_strip_rxvtag(hw, false);
1221                 rte_spinlock_unlock(&hw->lock);
1222         }
1223
1224         return ret;
1225 }
1226
1227 static int
1228 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
1229 {
1230         struct rte_vlan_filter_conf *vfc;
1231         struct hns3_hw *hw = &hns->hw;
1232         uint16_t vlan_id;
1233         uint64_t vbit;
1234         uint64_t ids;
1235         int ret = 0;
1236         uint32_t i;
1237
1238         vfc = &hw->data->vlan_filter_conf;
1239         for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1240                 if (vfc->ids[i] == 0)
1241                         continue;
1242                 ids = vfc->ids[i];
1243                 while (ids) {
1244                         /*
1245                          * 64 means the num bits of ids, one bit corresponds to
1246                          * one vlan id
1247                          */
1248                         vlan_id = 64 * i;
1249                         /* count trailing zeroes */
1250                         vbit = ~ids & (ids - 1);
1251                         /* clear least significant bit set */
1252                         ids ^= (ids ^ (ids - 1)) ^ vbit;
1253                         for (; vbit;) {
1254                                 vbit >>= 1;
1255                                 vlan_id++;
1256                         }
1257                         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1258                         if (ret) {
1259                                 hns3_err(hw,
1260                                          "VF handle vlan table failed, ret =%d, on = %d",
1261                                          ret, on);
1262                                 return ret;
1263                         }
1264                 }
1265         }
1266
1267         return ret;
1268 }
1269
1270 static int
1271 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1272 {
1273         return hns3vf_handle_all_vlan_table(hns, 0);
1274 }
1275
1276 static int
1277 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1278 {
1279         struct hns3_hw *hw = &hns->hw;
1280         struct rte_eth_conf *dev_conf;
1281         bool en;
1282         int ret;
1283
1284         dev_conf = &hw->data->dev_conf;
1285         en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
1286                                                                    : false;
1287         ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1288         if (ret)
1289                 hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1290                          ret);
1291         return ret;
1292 }
1293
1294 static int
1295 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1296 {
1297         struct hns3_adapter *hns = dev->data->dev_private;
1298         struct rte_eth_dev_data *data = dev->data;
1299         struct hns3_hw *hw = &hns->hw;
1300         int ret;
1301
1302         if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1303             data->dev_conf.txmode.hw_vlan_reject_untagged ||
1304             data->dev_conf.txmode.hw_vlan_insert_pvid) {
1305                 hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1306                               "or hw_vlan_insert_pvid is not support!");
1307         }
1308
1309         /* Apply vlan offload setting */
1310         ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
1311                                         RTE_ETH_VLAN_FILTER_MASK);
1312         if (ret)
1313                 hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
1314
1315         return ret;
1316 }
1317
1318 static int
1319 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1320 {
1321         uint8_t msg_data;
1322
1323         msg_data = alive ? 1 : 0;
1324         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
1325                                  sizeof(msg_data), false, NULL, 0);
1326 }
1327
1328 static void
1329 hns3vf_keep_alive_handler(void *param)
1330 {
1331         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1332         struct hns3_adapter *hns = eth_dev->data->dev_private;
1333         struct hns3_hw *hw = &hns->hw;
1334         int ret;
1335
1336         ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
1337                                 false, NULL, 0);
1338         if (ret)
1339                 hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1340                          ret);
1341
1342         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1343                           eth_dev);
1344 }
1345
1346 static void
1347 hns3vf_service_handler(void *param)
1348 {
1349         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1350         struct hns3_adapter *hns = eth_dev->data->dev_private;
1351         struct hns3_hw *hw = &hns->hw;
1352
1353         /*
1354          * The query link status and reset processing are executed in the
1355          * interrupt thread. When the IMP reset occurs, IMP will not respond,
1356          * and the query operation will timeout after 30ms. In the case of
1357          * multiple PF/VFs, each query failure timeout causes the IMP reset
1358          * interrupt to fail to respond within 100ms.
1359          * Before querying the link status, check whether there is a reset
1360          * pending, and if so, abandon the query.
1361          */
1362         if (!hns3vf_is_reset_pending(hns))
1363                 hns3vf_request_link_info(hw);
1364         else
1365                 hns3_warn(hw, "Cancel the query when reset is pending");
1366
1367         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1368                           eth_dev);
1369 }
1370
1371 static void
1372 hns3vf_start_poll_job(struct rte_eth_dev *dev)
1373 {
1374 #define HNS3_REQUEST_LINK_INFO_REMAINS_CNT      3
1375
1376         struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1377
1378         if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
1379                 vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
1380
1381         __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
1382
1383         hns3vf_service_handler(dev);
1384 }
1385
1386 static void
1387 hns3vf_stop_poll_job(struct rte_eth_dev *dev)
1388 {
1389         struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1390
1391         rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1392
1393         __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
1394 }
1395
1396 static int
1397 hns3_query_vf_resource(struct hns3_hw *hw)
1398 {
1399         struct hns3_vf_res_cmd *req;
1400         struct hns3_cmd_desc desc;
1401         uint16_t num_msi;
1402         int ret;
1403
1404         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
1405         ret = hns3_cmd_send(hw, &desc, 1);
1406         if (ret) {
1407                 hns3_err(hw, "query vf resource failed, ret = %d", ret);
1408                 return ret;
1409         }
1410
1411         req = (struct hns3_vf_res_cmd *)desc.data;
1412         num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
1413                                  HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
1414         if (num_msi < HNS3_MIN_VECTOR_NUM) {
1415                 hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
1416                          num_msi, HNS3_MIN_VECTOR_NUM);
1417                 return -EINVAL;
1418         }
1419
1420         hw->num_msi = num_msi;
1421
1422         return 0;
1423 }
1424
1425 static int
1426 hns3vf_init_hardware(struct hns3_adapter *hns)
1427 {
1428         struct hns3_hw *hw = &hns->hw;
1429         uint16_t mtu = hw->data->mtu;
1430         int ret;
1431
1432         ret = hns3vf_set_promisc_mode(hw, true, false, false);
1433         if (ret)
1434                 return ret;
1435
1436         ret = hns3vf_config_mtu(hw, mtu);
1437         if (ret)
1438                 goto err_init_hardware;
1439
1440         ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1441         if (ret) {
1442                 PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1443                 goto err_init_hardware;
1444         }
1445
1446         ret = hns3_config_gro(hw, false);
1447         if (ret) {
1448                 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1449                 goto err_init_hardware;
1450         }
1451
1452         /*
1453          * In the initialization clearing the all hardware mapping relationship
1454          * configurations between queues and interrupt vectors is needed, so
1455          * some error caused by the residual configurations, such as the
1456          * unexpected interrupt, can be avoid.
1457          */
1458         ret = hns3_init_ring_with_vector(hw);
1459         if (ret) {
1460                 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
1461                 goto err_init_hardware;
1462         }
1463
1464         return 0;
1465
1466 err_init_hardware:
1467         (void)hns3vf_set_promisc_mode(hw, false, false, false);
1468         return ret;
1469 }
1470
1471 static int
1472 hns3vf_clear_vport_list(struct hns3_hw *hw)
1473 {
1474         return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
1475                                  HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
1476                                  NULL, 0);
1477 }
1478
1479 static int
1480 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1481 {
1482         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1483         struct hns3_adapter *hns = eth_dev->data->dev_private;
1484         struct hns3_hw *hw = &hns->hw;
1485         int ret;
1486
1487         PMD_INIT_FUNC_TRACE();
1488
1489         /* Get hardware io base address from pcie BAR2 IO space */
1490         hw->io_base = pci_dev->mem_resource[2].addr;
1491
1492         /* Firmware command queue initialize */
1493         ret = hns3_cmd_init_queue(hw);
1494         if (ret) {
1495                 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1496                 goto err_cmd_init_queue;
1497         }
1498
1499         /* Firmware command initialize */
1500         ret = hns3_cmd_init(hw);
1501         if (ret) {
1502                 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1503                 goto err_cmd_init;
1504         }
1505
1506         hns3_tx_push_init(eth_dev);
1507
1508         /* Get VF resource */
1509         ret = hns3_query_vf_resource(hw);
1510         if (ret)
1511                 goto err_cmd_init;
1512
1513         rte_spinlock_init(&hw->mbx_resp.lock);
1514
1515         hns3vf_clear_event_cause(hw, 0);
1516
1517         ret = rte_intr_callback_register(pci_dev->intr_handle,
1518                                          hns3vf_interrupt_handler, eth_dev);
1519         if (ret) {
1520                 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1521                 goto err_intr_callback_register;
1522         }
1523
1524         /* Enable interrupt */
1525         rte_intr_enable(pci_dev->intr_handle);
1526         hns3vf_enable_irq0(hw);
1527
1528         /* Get configuration from PF */
1529         ret = hns3vf_get_configuration(hw);
1530         if (ret) {
1531                 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1532                 goto err_get_config;
1533         }
1534
1535         ret = hns3_tqp_stats_init(hw);
1536         if (ret)
1537                 goto err_get_config;
1538
1539         /* Hardware statistics of imissed registers cleared. */
1540         ret = hns3_update_imissed_stats(hw, true);
1541         if (ret) {
1542                 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
1543                 goto err_set_tc_queue;
1544         }
1545
1546         ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num);
1547         if (ret) {
1548                 PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
1549                 goto err_set_tc_queue;
1550         }
1551
1552         ret = hns3vf_clear_vport_list(hw);
1553         if (ret) {
1554                 PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1555                 goto err_set_tc_queue;
1556         }
1557
1558         ret = hns3vf_init_hardware(hns);
1559         if (ret)
1560                 goto err_set_tc_queue;
1561
1562         hns3_rss_set_default_args(hw);
1563
1564         ret = hns3vf_set_alive(hw, true);
1565         if (ret) {
1566                 PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1567                 goto err_set_tc_queue;
1568         }
1569
1570         return 0;
1571
1572 err_set_tc_queue:
1573         hns3_tqp_stats_uninit(hw);
1574
1575 err_get_config:
1576         hns3vf_disable_irq0(hw);
1577         rte_intr_disable(pci_dev->intr_handle);
1578         hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1579                              eth_dev);
1580 err_intr_callback_register:
1581 err_cmd_init:
1582         hns3_cmd_uninit(hw);
1583         hns3_cmd_destroy_queue(hw);
1584 err_cmd_init_queue:
1585         hw->io_base = NULL;
1586
1587         return ret;
1588 }
1589
1590 static void
1591 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1592 {
1593         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1594         struct hns3_adapter *hns = eth_dev->data->dev_private;
1595         struct hns3_hw *hw = &hns->hw;
1596
1597         PMD_INIT_FUNC_TRACE();
1598
1599         hns3_rss_uninit(hns);
1600         (void)hns3_config_gro(hw, false);
1601         (void)hns3vf_set_alive(hw, false);
1602         (void)hns3vf_set_promisc_mode(hw, false, false, false);
1603         hns3_flow_uninit(eth_dev);
1604         hns3_tqp_stats_uninit(hw);
1605         hns3vf_disable_irq0(hw);
1606         rte_intr_disable(pci_dev->intr_handle);
1607         hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1608                              eth_dev);
1609         hns3_cmd_uninit(hw);
1610         hns3_cmd_destroy_queue(hw);
1611         hw->io_base = NULL;
1612 }
1613
1614 static int
1615 hns3vf_do_stop(struct hns3_adapter *hns)
1616 {
1617         struct hns3_hw *hw = &hns->hw;
1618         int ret;
1619
1620         hw->mac.link_status = RTE_ETH_LINK_DOWN;
1621
1622         /*
1623          * The "hns3vf_do_stop" function will also be called by .stop_service to
1624          * prepare reset. At the time of global or IMP reset, the command cannot
1625          * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
1626          * accessed during the reset process. So the mbuf can not be released
1627          * during reset and is required to be released after the reset is
1628          * completed.
1629          */
1630         if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
1631                 hns3_dev_release_mbufs(hns);
1632
1633         if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
1634                 hns3_configure_all_mac_addr(hns, true);
1635                 ret = hns3_reset_all_tqps(hns);
1636                 if (ret) {
1637                         hns3_err(hw, "failed to reset all queues ret = %d",
1638                                  ret);
1639                         return ret;
1640                 }
1641         }
1642         return 0;
1643 }
1644
1645 static int
1646 hns3vf_dev_stop(struct rte_eth_dev *dev)
1647 {
1648         struct hns3_adapter *hns = dev->data->dev_private;
1649         struct hns3_hw *hw = &hns->hw;
1650
1651         PMD_INIT_FUNC_TRACE();
1652         dev->data->dev_started = 0;
1653
1654         hw->adapter_state = HNS3_NIC_STOPPING;
1655         hns3_set_rxtx_function(dev);
1656         rte_wmb();
1657         /* Disable datapath on secondary process. */
1658         hns3_mp_req_stop_rxtx(dev);
1659         /* Prevent crashes when queues are still in use. */
1660         rte_delay_ms(hw->cfg_max_queues);
1661
1662         rte_spinlock_lock(&hw->lock);
1663         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1664                 hns3_stop_tqps(hw);
1665                 hns3vf_do_stop(hns);
1666                 hns3_unmap_rx_interrupt(dev);
1667                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1668         }
1669         hns3_rx_scattered_reset(dev);
1670         hns3vf_stop_poll_job(dev);
1671         hns3_stop_report_lse(dev);
1672         rte_spinlock_unlock(&hw->lock);
1673
1674         return 0;
1675 }
1676
1677 static int
1678 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1679 {
1680         struct hns3_adapter *hns = eth_dev->data->dev_private;
1681         struct hns3_hw *hw = &hns->hw;
1682         int ret = 0;
1683
1684         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1685                 hns3_mp_uninit(eth_dev);
1686                 return 0;
1687         }
1688
1689         if (hw->adapter_state == HNS3_NIC_STARTED)
1690                 ret = hns3vf_dev_stop(eth_dev);
1691
1692         hw->adapter_state = HNS3_NIC_CLOSING;
1693         hns3_reset_abort(hns);
1694         hw->adapter_state = HNS3_NIC_CLOSED;
1695         rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1696         hns3_configure_all_mc_mac_addr(hns, true);
1697         hns3vf_remove_all_vlan_table(hns);
1698         hns3vf_uninit_vf(eth_dev);
1699         hns3_free_all_queues(eth_dev);
1700         rte_free(hw->reset.wait_data);
1701         hns3_mp_uninit(eth_dev);
1702         hns3_warn(hw, "Close port %u finished", hw->data->port_id);
1703
1704         return ret;
1705 }
1706
1707 static int
1708 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1709                        __rte_unused int wait_to_complete)
1710 {
1711         struct hns3_adapter *hns = eth_dev->data->dev_private;
1712         struct hns3_hw *hw = &hns->hw;
1713         struct hns3_mac *mac = &hw->mac;
1714         struct rte_eth_link new_link;
1715
1716         memset(&new_link, 0, sizeof(new_link));
1717         switch (mac->link_speed) {
1718         case RTE_ETH_SPEED_NUM_10M:
1719         case RTE_ETH_SPEED_NUM_100M:
1720         case RTE_ETH_SPEED_NUM_1G:
1721         case RTE_ETH_SPEED_NUM_10G:
1722         case RTE_ETH_SPEED_NUM_25G:
1723         case RTE_ETH_SPEED_NUM_40G:
1724         case RTE_ETH_SPEED_NUM_50G:
1725         case RTE_ETH_SPEED_NUM_100G:
1726         case RTE_ETH_SPEED_NUM_200G:
1727                 if (mac->link_status)
1728                         new_link.link_speed = mac->link_speed;
1729                 break;
1730         default:
1731                 if (mac->link_status)
1732                         new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
1733                 break;
1734         }
1735
1736         if (!mac->link_status)
1737                 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1738
1739         new_link.link_duplex = mac->link_duplex;
1740         new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
1741         new_link.link_autoneg =
1742             !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
1743
1744         return rte_eth_linkstatus_set(eth_dev, &new_link);
1745 }
1746
1747 static int
1748 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1749 {
1750         struct hns3_hw *hw = &hns->hw;
1751         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1752         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1753         int ret;
1754
1755         ret = hns3vf_set_tc_queue_mapping(hns, nb_rx_q, nb_tx_q);
1756         if (ret)
1757                 return ret;
1758
1759         hns3_enable_rxd_adv_layout(hw);
1760
1761         ret = hns3_init_queues(hns, reset_queue);
1762         if (ret)
1763                 hns3_err(hw, "failed to init queues, ret = %d.", ret);
1764
1765         return ret;
1766 }
1767
1768 static void
1769 hns3vf_restore_filter(struct rte_eth_dev *dev)
1770 {
1771         hns3_restore_rss_filter(dev);
1772 }
1773
1774 static int
1775 hns3vf_dev_start(struct rte_eth_dev *dev)
1776 {
1777         struct hns3_adapter *hns = dev->data->dev_private;
1778         struct hns3_hw *hw = &hns->hw;
1779         int ret;
1780
1781         PMD_INIT_FUNC_TRACE();
1782         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1783                 return -EBUSY;
1784
1785         rte_spinlock_lock(&hw->lock);
1786         hw->adapter_state = HNS3_NIC_STARTING;
1787         ret = hns3vf_do_start(hns, true);
1788         if (ret) {
1789                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1790                 rte_spinlock_unlock(&hw->lock);
1791                 return ret;
1792         }
1793         ret = hns3_map_rx_interrupt(dev);
1794         if (ret)
1795                 goto map_rx_inter_err;
1796
1797         /*
1798          * There are three register used to control the status of a TQP
1799          * (contains a pair of Tx queue and Rx queue) in the new version network
1800          * engine. One is used to control the enabling of Tx queue, the other is
1801          * used to control the enabling of Rx queue, and the last is the master
1802          * switch used to control the enabling of the tqp. The Tx register and
1803          * TQP register must be enabled at the same time to enable a Tx queue.
1804          * The same applies to the Rx queue. For the older network enginem, this
1805          * function only refresh the enabled flag, and it is used to update the
1806          * status of queue in the dpdk framework.
1807          */
1808         ret = hns3_start_all_txqs(dev);
1809         if (ret)
1810                 goto map_rx_inter_err;
1811
1812         ret = hns3_start_all_rxqs(dev);
1813         if (ret)
1814                 goto start_all_rxqs_fail;
1815
1816         hw->adapter_state = HNS3_NIC_STARTED;
1817         rte_spinlock_unlock(&hw->lock);
1818
1819         hns3_rx_scattered_calc(dev);
1820         hns3_set_rxtx_function(dev);
1821         hns3_mp_req_start_rxtx(dev);
1822
1823         hns3vf_restore_filter(dev);
1824
1825         /* Enable interrupt of all rx queues before enabling queues */
1826         hns3_dev_all_rx_queue_intr_enable(hw, true);
1827         hns3_start_tqps(hw);
1828
1829         if (dev->data->dev_conf.intr_conf.lsc != 0)
1830                 hns3vf_dev_link_update(dev, 0);
1831         hns3vf_start_poll_job(dev);
1832
1833         return ret;
1834
1835 start_all_rxqs_fail:
1836         hns3_stop_all_txqs(dev);
1837 map_rx_inter_err:
1838         (void)hns3vf_do_stop(hns);
1839         hw->adapter_state = HNS3_NIC_CONFIGURED;
1840         rte_spinlock_unlock(&hw->lock);
1841
1842         return ret;
1843 }
1844
1845 static bool
1846 is_vf_reset_done(struct hns3_hw *hw)
1847 {
1848 #define HNS3_FUN_RST_ING_BITS \
1849         (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1850          BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1851          BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1852          BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1853
1854         uint32_t val;
1855
1856         if (hw->reset.level == HNS3_VF_RESET) {
1857                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1858                 if (val & HNS3_VF_RST_ING_BIT)
1859                         return false;
1860         } else {
1861                 val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1862                 if (val & HNS3_FUN_RST_ING_BITS)
1863                         return false;
1864         }
1865         return true;
1866 }
1867
1868 bool
1869 hns3vf_is_reset_pending(struct hns3_adapter *hns)
1870 {
1871         struct hns3_hw *hw = &hns->hw;
1872         enum hns3_reset_level reset;
1873
1874         /*
1875          * According to the protocol of PCIe, FLR to a PF device resets the PF
1876          * state as well as the SR-IOV extended capability including VF Enable
1877          * which means that VFs no longer exist.
1878          *
1879          * HNS3_VF_FULL_RESET means PF device is in FLR reset. when PF device
1880          * is in FLR stage, the register state of VF device is not reliable,
1881          * so register states detection can not be carried out. In this case,
1882          * we just ignore the register states and return false to indicate that
1883          * there are no other reset states that need to be processed by driver.
1884          */
1885         if (hw->reset.level == HNS3_VF_FULL_RESET)
1886                 return false;
1887
1888         /* Check the registers to confirm whether there is reset pending */
1889         hns3vf_check_event_cause(hns, NULL);
1890         reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
1891         if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET &&
1892             hw->reset.level < reset) {
1893                 hns3_warn(hw, "High level reset %d is pending", reset);
1894                 return true;
1895         }
1896         return false;
1897 }
1898
1899 static int
1900 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1901 {
1902         struct hns3_hw *hw = &hns->hw;
1903         struct hns3_wait_data *wait_data = hw->reset.wait_data;
1904         struct timeval tv;
1905
1906         if (wait_data->result == HNS3_WAIT_SUCCESS) {
1907                 /*
1908                  * After vf reset is ready, the PF may not have completed
1909                  * the reset processing. The vf sending mbox to PF may fail
1910                  * during the pf reset, so it is better to add extra delay.
1911                  */
1912                 if (hw->reset.level == HNS3_VF_FUNC_RESET ||
1913                     hw->reset.level == HNS3_FLR_RESET)
1914                         return 0;
1915                 /* Reset retry process, no need to add extra delay. */
1916                 if (hw->reset.attempts)
1917                         return 0;
1918                 if (wait_data->check_completion == NULL)
1919                         return 0;
1920
1921                 wait_data->check_completion = NULL;
1922                 wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC;
1923                 wait_data->count = 1;
1924                 wait_data->result = HNS3_WAIT_REQUEST;
1925                 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
1926                                   wait_data);
1927                 hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete");
1928                 return -EAGAIN;
1929         } else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1930                 hns3_clock_gettime(&tv);
1931                 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1932                           tv.tv_sec, tv.tv_usec);
1933                 return -ETIME;
1934         } else if (wait_data->result == HNS3_WAIT_REQUEST)
1935                 return -EAGAIN;
1936
1937         wait_data->hns = hns;
1938         wait_data->check_completion = is_vf_reset_done;
1939         wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1940                                 HNS3VF_RESET_WAIT_MS + hns3_clock_gettime_ms();
1941         wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1942         wait_data->count = HNS3VF_RESET_WAIT_CNT;
1943         wait_data->result = HNS3_WAIT_REQUEST;
1944         rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1945         return -EAGAIN;
1946 }
1947
1948 static int
1949 hns3vf_prepare_reset(struct hns3_adapter *hns)
1950 {
1951         struct hns3_hw *hw = &hns->hw;
1952         int ret;
1953
1954         if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1955                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
1956                                         0, true, NULL, 0);
1957                 if (ret)
1958                         return ret;
1959         }
1960         __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
1961
1962         return 0;
1963 }
1964
1965 static int
1966 hns3vf_stop_service(struct hns3_adapter *hns)
1967 {
1968         struct hns3_hw *hw = &hns->hw;
1969         struct rte_eth_dev *eth_dev;
1970
1971         eth_dev = &rte_eth_devices[hw->data->port_id];
1972         if (hw->adapter_state == HNS3_NIC_STARTED) {
1973                 /*
1974                  * Make sure call update link status before hns3vf_stop_poll_job
1975                  * because update link status depend on polling job exist.
1976                  */
1977                 hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
1978                                           hw->mac.link_duplex);
1979                 hns3vf_stop_poll_job(eth_dev);
1980         }
1981         hw->mac.link_status = RTE_ETH_LINK_DOWN;
1982
1983         hns3_set_rxtx_function(eth_dev);
1984         rte_wmb();
1985         /* Disable datapath on secondary process. */
1986         hns3_mp_req_stop_rxtx(eth_dev);
1987         rte_delay_ms(hw->cfg_max_queues);
1988
1989         rte_spinlock_lock(&hw->lock);
1990         if (hw->adapter_state == HNS3_NIC_STARTED ||
1991             hw->adapter_state == HNS3_NIC_STOPPING) {
1992                 hns3_enable_all_queues(hw, false);
1993                 hns3vf_do_stop(hns);
1994                 hw->reset.mbuf_deferred_free = true;
1995         } else
1996                 hw->reset.mbuf_deferred_free = false;
1997
1998         /*
1999          * It is cumbersome for hardware to pick-and-choose entries for deletion
2000          * from table space. Hence, for function reset software intervention is
2001          * required to delete the entries.
2002          */
2003         if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
2004                 hns3_configure_all_mc_mac_addr(hns, true);
2005         rte_spinlock_unlock(&hw->lock);
2006
2007         return 0;
2008 }
2009
2010 static int
2011 hns3vf_start_service(struct hns3_adapter *hns)
2012 {
2013         struct hns3_hw *hw = &hns->hw;
2014         struct rte_eth_dev *eth_dev;
2015
2016         eth_dev = &rte_eth_devices[hw->data->port_id];
2017         hns3_set_rxtx_function(eth_dev);
2018         hns3_mp_req_start_rxtx(eth_dev);
2019         if (hw->adapter_state == HNS3_NIC_STARTED) {
2020                 hns3vf_start_poll_job(eth_dev);
2021
2022                 /* Enable interrupt of all rx queues before enabling queues */
2023                 hns3_dev_all_rx_queue_intr_enable(hw, true);
2024                 /*
2025                  * Enable state of each rxq and txq will be recovered after
2026                  * reset, so we need to restore them before enable all tqps;
2027                  */
2028                 hns3_restore_tqp_enable_state(hw);
2029                 /*
2030                  * When finished the initialization, enable queues to receive
2031                  * and transmit packets.
2032                  */
2033                 hns3_enable_all_queues(hw, true);
2034         }
2035
2036         return 0;
2037 }
2038
2039 static int
2040 hns3vf_check_default_mac_change(struct hns3_hw *hw)
2041 {
2042         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
2043         struct rte_ether_addr *hw_mac;
2044         int ret;
2045
2046         /*
2047          * The hns3 PF ethdev driver in kernel support setting VF MAC address
2048          * on the host by "ip link set ..." command. If the hns3 PF kernel
2049          * ethdev driver sets the MAC address for VF device after the
2050          * initialization of the related VF device, the PF driver will notify
2051          * VF driver to reset VF device to make the new MAC address effective
2052          * immediately. The hns3 VF PMD should check whether the MAC
2053          * address has been changed by the PF kernel ethdev driver, if changed
2054          * VF driver should configure hardware using the new MAC address in the
2055          * recovering hardware configuration stage of the reset process.
2056          */
2057         ret = hns3vf_get_host_mac_addr(hw);
2058         if (ret)
2059                 return ret;
2060
2061         hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
2062         ret = rte_is_zero_ether_addr(hw_mac);
2063         if (ret) {
2064                 rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
2065         } else {
2066                 ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
2067                 if (!ret) {
2068                         rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
2069                         hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
2070                                               &hw->data->mac_addrs[0]);
2071                         hns3_warn(hw, "Default MAC address has been changed to:"
2072                                   " %s by the host PF kernel ethdev driver",
2073                                   mac_str);
2074                 }
2075         }
2076
2077         return 0;
2078 }
2079
2080 static int
2081 hns3vf_restore_conf(struct hns3_adapter *hns)
2082 {
2083         struct hns3_hw *hw = &hns->hw;
2084         int ret;
2085
2086         ret = hns3vf_check_default_mac_change(hw);
2087         if (ret)
2088                 return ret;
2089
2090         ret = hns3_configure_all_mac_addr(hns, false);
2091         if (ret)
2092                 return ret;
2093
2094         ret = hns3_configure_all_mc_mac_addr(hns, false);
2095         if (ret)
2096                 goto err_mc_mac;
2097
2098         ret = hns3vf_restore_promisc(hns);
2099         if (ret)
2100                 goto err_vlan_table;
2101
2102         ret = hns3vf_restore_vlan_conf(hns);
2103         if (ret)
2104                 goto err_vlan_table;
2105
2106         ret = hns3vf_get_port_base_vlan_filter_state(hw);
2107         if (ret)
2108                 goto err_vlan_table;
2109
2110         ret = hns3_restore_rx_interrupt(hw);
2111         if (ret)
2112                 goto err_vlan_table;
2113
2114         ret = hns3_restore_gro_conf(hw);
2115         if (ret)
2116                 goto err_vlan_table;
2117
2118         if (hw->adapter_state == HNS3_NIC_STARTED) {
2119                 ret = hns3vf_do_start(hns, false);
2120                 if (ret)
2121                         goto err_vlan_table;
2122                 hns3_info(hw, "hns3vf dev restart successful!");
2123         } else if (hw->adapter_state == HNS3_NIC_STOPPING)
2124                 hw->adapter_state = HNS3_NIC_CONFIGURED;
2125
2126         ret = hns3vf_set_alive(hw, true);
2127         if (ret) {
2128                 hns3_err(hw, "failed to VF send alive to PF: %d", ret);
2129                 goto err_vlan_table;
2130         }
2131
2132         return 0;
2133
2134 err_vlan_table:
2135         hns3_configure_all_mc_mac_addr(hns, true);
2136 err_mc_mac:
2137         hns3_configure_all_mac_addr(hns, true);
2138         return ret;
2139 }
2140
2141 static enum hns3_reset_level
2142 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
2143 {
2144         enum hns3_reset_level reset_level;
2145
2146         /* return the highest priority reset level amongst all */
2147         if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
2148                 reset_level = HNS3_VF_RESET;
2149         else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
2150                 reset_level = HNS3_VF_FULL_RESET;
2151         else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
2152                 reset_level = HNS3_VF_PF_FUNC_RESET;
2153         else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
2154                 reset_level = HNS3_VF_FUNC_RESET;
2155         else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
2156                 reset_level = HNS3_FLR_RESET;
2157         else
2158                 reset_level = HNS3_NONE_RESET;
2159
2160         if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
2161                 return HNS3_NONE_RESET;
2162
2163         return reset_level;
2164 }
2165
2166 static void
2167 hns3vf_reset_service(void *param)
2168 {
2169         struct hns3_adapter *hns = (struct hns3_adapter *)param;
2170         struct hns3_hw *hw = &hns->hw;
2171         enum hns3_reset_level reset_level;
2172         struct timeval tv_delta;
2173         struct timeval tv_start;
2174         struct timeval tv;
2175         uint64_t msec;
2176
2177         /*
2178          * The interrupt is not triggered within the delay time.
2179          * The interrupt may have been lost. It is necessary to handle
2180          * the interrupt to recover from the error.
2181          */
2182         if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2183                             SCHEDULE_DEFERRED) {
2184                 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
2185                                  __ATOMIC_RELAXED);
2186                 hns3_err(hw, "Handling interrupts in delayed tasks");
2187                 hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
2188                 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2189                 if (reset_level == HNS3_NONE_RESET) {
2190                         hns3_err(hw, "No reset level is set, try global reset");
2191                         hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
2192                 }
2193         }
2194         __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
2195
2196         /*
2197          * Hardware reset has been notified, we now have to poll & check if
2198          * hardware has actually completed the reset sequence.
2199          */
2200         reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2201         if (reset_level != HNS3_NONE_RESET) {
2202                 hns3_clock_gettime(&tv_start);
2203                 hns3_reset_process(hns, reset_level);
2204                 hns3_clock_gettime(&tv);
2205                 timersub(&tv, &tv_start, &tv_delta);
2206                 msec = hns3_clock_calctime_ms(&tv_delta);
2207                 if (msec > HNS3_RESET_PROCESS_MS)
2208                         hns3_err(hw, "%d handle long time delta %" PRIu64
2209                                  " ms time=%ld.%.6ld",
2210                                  hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
2211         }
2212 }
2213
2214 static int
2215 hns3vf_reinit_dev(struct hns3_adapter *hns)
2216 {
2217         struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
2218         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2219         struct hns3_hw *hw = &hns->hw;
2220         int ret;
2221
2222         if (hw->reset.level == HNS3_VF_FULL_RESET) {
2223                 rte_intr_disable(pci_dev->intr_handle);
2224                 ret = hns3vf_set_bus_master(pci_dev, true);
2225                 if (ret < 0) {
2226                         hns3_err(hw, "failed to set pci bus, ret = %d", ret);
2227                         return ret;
2228                 }
2229         }
2230
2231         /* Firmware command initialize */
2232         ret = hns3_cmd_init(hw);
2233         if (ret) {
2234                 hns3_err(hw, "Failed to init cmd: %d", ret);
2235                 return ret;
2236         }
2237
2238         if (hw->reset.level == HNS3_VF_FULL_RESET) {
2239                 /*
2240                  * UIO enables msix by writing the pcie configuration space
2241                  * vfio_pci enables msix in rte_intr_enable.
2242                  */
2243                 if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO ||
2244                     pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) {
2245                         if (hns3vf_enable_msix(pci_dev, true))
2246                                 hns3_err(hw, "Failed to enable msix");
2247                 }
2248
2249                 rte_intr_enable(pci_dev->intr_handle);
2250         }
2251
2252         ret = hns3_reset_all_tqps(hns);
2253         if (ret) {
2254                 hns3_err(hw, "Failed to reset all queues: %d", ret);
2255                 return ret;
2256         }
2257
2258         ret = hns3vf_init_hardware(hns);
2259         if (ret) {
2260                 hns3_err(hw, "Failed to init hardware: %d", ret);
2261                 return ret;
2262         }
2263
2264         return 0;
2265 }
2266
2267 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
2268         .dev_configure      = hns3vf_dev_configure,
2269         .dev_start          = hns3vf_dev_start,
2270         .dev_stop           = hns3vf_dev_stop,
2271         .dev_close          = hns3vf_dev_close,
2272         .mtu_set            = hns3vf_dev_mtu_set,
2273         .promiscuous_enable = hns3vf_dev_promiscuous_enable,
2274         .promiscuous_disable = hns3vf_dev_promiscuous_disable,
2275         .allmulticast_enable = hns3vf_dev_allmulticast_enable,
2276         .allmulticast_disable = hns3vf_dev_allmulticast_disable,
2277         .stats_get          = hns3_stats_get,
2278         .stats_reset        = hns3_stats_reset,
2279         .xstats_get         = hns3_dev_xstats_get,
2280         .xstats_get_names   = hns3_dev_xstats_get_names,
2281         .xstats_reset       = hns3_dev_xstats_reset,
2282         .xstats_get_by_id   = hns3_dev_xstats_get_by_id,
2283         .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
2284         .dev_infos_get      = hns3_dev_infos_get,
2285         .fw_version_get     = hns3_fw_version_get,
2286         .rx_queue_setup     = hns3_rx_queue_setup,
2287         .tx_queue_setup     = hns3_tx_queue_setup,
2288         .rx_queue_release   = hns3_dev_rx_queue_release,
2289         .tx_queue_release   = hns3_dev_tx_queue_release,
2290         .rx_queue_start     = hns3_dev_rx_queue_start,
2291         .rx_queue_stop      = hns3_dev_rx_queue_stop,
2292         .tx_queue_start     = hns3_dev_tx_queue_start,
2293         .tx_queue_stop      = hns3_dev_tx_queue_stop,
2294         .rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
2295         .rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
2296         .rxq_info_get       = hns3_rxq_info_get,
2297         .txq_info_get       = hns3_txq_info_get,
2298         .rx_burst_mode_get  = hns3_rx_burst_mode_get,
2299         .tx_burst_mode_get  = hns3_tx_burst_mode_get,
2300         .mac_addr_add       = hns3_add_mac_addr,
2301         .mac_addr_remove    = hns3_remove_mac_addr,
2302         .mac_addr_set       = hns3vf_set_default_mac_addr,
2303         .set_mc_addr_list   = hns3_set_mc_mac_addr_list,
2304         .link_update        = hns3vf_dev_link_update,
2305         .rss_hash_update    = hns3_dev_rss_hash_update,
2306         .rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
2307         .reta_update        = hns3_dev_rss_reta_update,
2308         .reta_query         = hns3_dev_rss_reta_query,
2309         .flow_ops_get       = hns3_dev_flow_ops_get,
2310         .vlan_filter_set    = hns3vf_vlan_filter_set,
2311         .vlan_offload_set   = hns3vf_vlan_offload_set,
2312         .get_reg            = hns3_get_regs,
2313         .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
2314         .tx_done_cleanup    = hns3_tx_done_cleanup,
2315 };
2316
2317 static const struct hns3_reset_ops hns3vf_reset_ops = {
2318         .reset_service       = hns3vf_reset_service,
2319         .stop_service        = hns3vf_stop_service,
2320         .prepare_reset       = hns3vf_prepare_reset,
2321         .wait_hardware_ready = hns3vf_wait_hardware_ready,
2322         .reinit_dev          = hns3vf_reinit_dev,
2323         .restore_conf        = hns3vf_restore_conf,
2324         .start_service       = hns3vf_start_service,
2325 };
2326
2327 static void
2328 hns3vf_init_hw_ops(struct hns3_hw *hw)
2329 {
2330         hw->ops.add_mc_mac_addr = hns3vf_add_mc_mac_addr;
2331         hw->ops.del_mc_mac_addr = hns3vf_remove_mc_mac_addr;
2332         hw->ops.add_uc_mac_addr = hns3vf_add_uc_mac_addr;
2333         hw->ops.del_uc_mac_addr = hns3vf_remove_uc_mac_addr;
2334         hw->ops.bind_ring_with_vector = hns3vf_bind_ring_with_vector;
2335 }
2336
2337 static int
2338 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
2339 {
2340         struct hns3_adapter *hns = eth_dev->data->dev_private;
2341         struct hns3_hw *hw = &hns->hw;
2342         int ret;
2343
2344         PMD_INIT_FUNC_TRACE();
2345
2346         hns3_flow_init(eth_dev);
2347
2348         hns3_set_rxtx_function(eth_dev);
2349         eth_dev->dev_ops = &hns3vf_eth_dev_ops;
2350         eth_dev->rx_queue_count = hns3_rx_queue_count;
2351         ret = hns3_mp_init(eth_dev);
2352         if (ret)
2353                 goto err_mp_init;
2354
2355         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2356                 hns3_tx_push_init(eth_dev);
2357                 return 0;
2358         }
2359
2360         hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2361         hns->is_vf = true;
2362         hw->data = eth_dev->data;
2363         hns3_parse_devargs(eth_dev);
2364
2365         ret = hns3_reset_init(hw);
2366         if (ret)
2367                 goto err_init_reset;
2368         hw->reset.ops = &hns3vf_reset_ops;
2369
2370         hns3vf_init_hw_ops(hw);
2371         ret = hns3vf_init_vf(eth_dev);
2372         if (ret) {
2373                 PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2374                 goto err_init_vf;
2375         }
2376
2377         ret = hns3_init_mac_addrs(eth_dev);
2378         if (ret != 0)
2379                 goto err_init_mac_addrs;
2380
2381         hw->adapter_state = HNS3_NIC_INITIALIZED;
2382
2383         if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2384                             SCHEDULE_PENDING) {
2385                 hns3_err(hw, "Reschedule reset service after dev_init");
2386                 hns3_schedule_reset(hns);
2387         } else {
2388                 /* IMP will wait ready flag before reset */
2389                 hns3_notify_reset_ready(hw, false);
2390         }
2391         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
2392                           eth_dev);
2393         return 0;
2394
2395 err_init_mac_addrs:
2396         hns3vf_uninit_vf(eth_dev);
2397
2398 err_init_vf:
2399         rte_free(hw->reset.wait_data);
2400
2401 err_init_reset:
2402         hns3_mp_uninit(eth_dev);
2403
2404 err_mp_init:
2405         eth_dev->dev_ops = NULL;
2406         eth_dev->rx_pkt_burst = NULL;
2407         eth_dev->rx_descriptor_status = NULL;
2408         eth_dev->tx_pkt_burst = NULL;
2409         eth_dev->tx_pkt_prepare = NULL;
2410         eth_dev->tx_descriptor_status = NULL;
2411
2412         return ret;
2413 }
2414
2415 static int
2416 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
2417 {
2418         struct hns3_adapter *hns = eth_dev->data->dev_private;
2419         struct hns3_hw *hw = &hns->hw;
2420
2421         PMD_INIT_FUNC_TRACE();
2422
2423         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2424                 __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
2425                 hns3_mp_uninit(eth_dev);
2426                 return 0;
2427         }
2428
2429         if (hw->adapter_state < HNS3_NIC_CLOSING)
2430                 hns3vf_dev_close(eth_dev);
2431
2432         hw->adapter_state = HNS3_NIC_REMOVED;
2433         return 0;
2434 }
2435
2436 static int
2437 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2438                      struct rte_pci_device *pci_dev)
2439 {
2440         return rte_eth_dev_pci_generic_probe(pci_dev,
2441                                              sizeof(struct hns3_adapter),
2442                                              hns3vf_dev_init);
2443 }
2444
2445 static int
2446 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
2447 {
2448         return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
2449 }
2450
2451 static const struct rte_pci_id pci_id_hns3vf_map[] = {
2452         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
2453         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
2454         { .vendor_id = 0, }, /* sentinel */
2455 };
2456
2457 static struct rte_pci_driver rte_hns3vf_pmd = {
2458         .id_table = pci_id_hns3vf_map,
2459         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2460         .probe = eth_hns3vf_pci_probe,
2461         .remove = eth_hns3vf_pci_remove,
2462 };
2463
2464 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
2465 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
2466 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");
2467 RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf,
2468                 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
2469                 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
2470                 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
2471                 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16_t> ");