16b7f015fdb859e57a0442eaff65e9646f370071
[dpdk.git] / drivers / net / hns3 / hns3_ethdev_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <arpa/inet.h>
12 #include <linux/pci_regs.h>
13
14 #include <rte_alarm.h>
15 #include <rte_atomic.h>
16 #include <rte_bus_pci.h>
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_eal.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_ethdev_pci.h>
25 #include <rte_interrupts.h>
26 #include <rte_io.h>
27 #include <rte_log.h>
28 #include <rte_pci.h>
29 #include <rte_vfio.h>
30
31 #include "hns3_ethdev.h"
32 #include "hns3_logs.h"
33 #include "hns3_rxtx.h"
34 #include "hns3_regs.h"
35 #include "hns3_intr.h"
36 #include "hns3_dcb.h"
37 #include "hns3_mp.h"
38
39 #define HNS3VF_KEEP_ALIVE_INTERVAL      2000000 /* us */
40 #define HNS3VF_SERVICE_INTERVAL         1000000 /* us */
41
42 #define HNS3VF_RESET_WAIT_MS    20
43 #define HNS3VF_RESET_WAIT_CNT   2000
44
45 /* Reset related Registers */
46 #define HNS3_GLOBAL_RESET_BIT           0
47 #define HNS3_CORE_RESET_BIT             1
48 #define HNS3_IMP_RESET_BIT              2
49 #define HNS3_FUN_RST_ING_B              0
50
51 enum hns3vf_evt_cause {
52         HNS3VF_VECTOR0_EVENT_RST,
53         HNS3VF_VECTOR0_EVENT_MBX,
54         HNS3VF_VECTOR0_EVENT_OTHER,
55 };
56
57 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
58                                                     uint64_t *levels);
59 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
60 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
61
62 /* set PCI bus mastering */
63 static void
64 hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
65 {
66         uint16_t reg;
67
68         rte_pci_read_config(device, &reg, sizeof(reg), PCI_COMMAND);
69
70         if (op)
71                 /* set the master bit */
72                 reg |= PCI_COMMAND_MASTER;
73         else
74                 reg &= ~(PCI_COMMAND_MASTER);
75
76         rte_pci_write_config(device, &reg, sizeof(reg), PCI_COMMAND);
77 }
78
79 /**
80  * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
81  * @cap: the capability
82  *
83  * Return the address of the given capability within the PCI capability list.
84  */
85 static int
86 hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
87 {
88 #define MAX_PCIE_CAPABILITY 48
89         uint16_t status;
90         uint8_t pos;
91         uint8_t id;
92         int ttl;
93
94         rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
95         if (!(status & PCI_STATUS_CAP_LIST))
96                 return 0;
97
98         ttl = MAX_PCIE_CAPABILITY;
99         rte_pci_read_config(device, &pos, sizeof(pos), PCI_CAPABILITY_LIST);
100         while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
101                 rte_pci_read_config(device, &id, sizeof(id),
102                                     (pos + PCI_CAP_LIST_ID));
103
104                 if (id == 0xFF)
105                         break;
106
107                 if (id == cap)
108                         return (int)pos;
109
110                 rte_pci_read_config(device, &pos, sizeof(pos),
111                                     (pos + PCI_CAP_LIST_NEXT));
112         }
113         return 0;
114 }
115
116 static int
117 hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
118 {
119         uint16_t control;
120         int pos;
121
122         pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
123         if (pos) {
124                 rte_pci_read_config(device, &control, sizeof(control),
125                                     (pos + PCI_MSIX_FLAGS));
126                 if (op)
127                         control |= PCI_MSIX_FLAGS_ENABLE;
128                 else
129                         control &= ~PCI_MSIX_FLAGS_ENABLE;
130                 rte_pci_write_config(device, &control, sizeof(control),
131                                      (pos + PCI_MSIX_FLAGS));
132                 return 0;
133         }
134         return -ENXIO;
135 }
136
137 static int
138 hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
139                     __attribute__ ((unused)) uint32_t idx,
140                     __attribute__ ((unused)) uint32_t pool)
141 {
142         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
143         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
144         int ret;
145
146         rte_spinlock_lock(&hw->lock);
147         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
148                                 HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
149                                 RTE_ETHER_ADDR_LEN, false, NULL, 0);
150         rte_spinlock_unlock(&hw->lock);
151         if (ret) {
152                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
153                                       mac_addr);
154                 hns3_err(hw, "Failed to add mac addr(%s) for vf: %d", mac_str,
155                          ret);
156         }
157
158         return ret;
159 }
160
161 static void
162 hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
163 {
164         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
165         /* index will be checked by upper level rte interface */
166         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
167         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
168         int ret;
169
170         rte_spinlock_lock(&hw->lock);
171         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
172                                 HNS3_MBX_MAC_VLAN_UC_REMOVE,
173                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
174                                 NULL, 0);
175         rte_spinlock_unlock(&hw->lock);
176         if (ret) {
177                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
178                                       mac_addr);
179                 hns3_err(hw, "Failed to remove mac addr(%s) for vf: %d",
180                          mac_str, ret);
181         }
182 }
183
184 static int
185 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
186                             struct rte_ether_addr *mac_addr)
187 {
188 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
189         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
190         struct rte_ether_addr *old_addr;
191         uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
192         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
193         int ret;
194
195         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
196                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
197                                       mac_addr);
198                 hns3_err(hw, "Failed to set mac addr, addr(%s) invalid.",
199                          mac_str);
200                 return -EINVAL;
201         }
202
203         old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
204         rte_spinlock_lock(&hw->lock);
205         memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
206         memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
207                RTE_ETHER_ADDR_LEN);
208
209         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
210                                 HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
211                                 HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
212         if (ret) {
213                 /*
214                  * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev
215                  * driver. When user has configured a MAC address for VF device
216                  * by "ip link set ..." command based on the PF device, the hns3
217                  * PF kernel ethdev driver does not allow VF driver to request
218                  * reconfiguring a different default MAC address, and return
219                  * -EPREM to VF driver through mailbox.
220                  */
221                 if (ret == -EPERM) {
222                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
223                                               old_addr);
224                         hns3_warn(hw, "Has permanet mac addr(%s) for vf",
225                                   mac_str);
226                 } else {
227                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
228                                               mac_addr);
229                         hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
230                                  mac_str, ret);
231                 }
232         }
233
234         rte_ether_addr_copy(mac_addr,
235                             (struct rte_ether_addr *)hw->mac.mac_addr);
236         rte_spinlock_unlock(&hw->lock);
237
238         return ret;
239 }
240
241 static int
242 hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del)
243 {
244         struct hns3_hw *hw = &hns->hw;
245         struct rte_ether_addr *addr;
246         enum hns3_mbx_mac_vlan_subcode opcode;
247         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
248         int ret = 0;
249         int i;
250
251         if (del)
252                 opcode = HNS3_MBX_MAC_VLAN_UC_REMOVE;
253         else
254                 opcode = HNS3_MBX_MAC_VLAN_UC_ADD;
255         for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) {
256                 addr = &hw->data->mac_addrs[i];
257                 if (!rte_is_valid_assigned_ether_addr(addr))
258                         continue;
259                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
260                 hns3_dbg(hw, "rm mac addr: %s", mac_str);
261                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, opcode,
262                                         addr->addr_bytes, RTE_ETHER_ADDR_LEN,
263                                         false, NULL, 0);
264                 if (ret) {
265                         hns3_err(hw, "Failed to remove mac addr for vf: %d",
266                                  ret);
267                         break;
268                 }
269         }
270         return ret;
271 }
272
273 static int
274 hns3vf_add_mc_mac_addr(struct hns3_adapter *hns,
275                        struct rte_ether_addr *mac_addr)
276 {
277         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
278         struct hns3_hw *hw = &hns->hw;
279         int ret;
280
281         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
282                                 HNS3_MBX_MAC_VLAN_MC_ADD,
283                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
284                                 NULL, 0);
285         if (ret) {
286                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
287                                       mac_addr);
288                 hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
289                          mac_str, ret);
290                 return ret;
291         }
292
293         return 0;
294 }
295
296 static int
297 hns3vf_remove_mc_mac_addr(struct hns3_adapter *hns,
298                           struct rte_ether_addr *mac_addr)
299 {
300         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
301         struct hns3_hw *hw = &hns->hw;
302         int ret;
303
304         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
305                                 HNS3_MBX_MAC_VLAN_MC_REMOVE,
306                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
307                                 NULL, 0);
308         if (ret) {
309                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
310                                       mac_addr);
311                 hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
312                          mac_str, ret);
313                 return ret;
314         }
315
316         return 0;
317 }
318
319 static int
320 hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev,
321                             struct rte_ether_addr *mc_addr_set,
322                             uint32_t nb_mc_addr)
323 {
324         struct hns3_adapter *hns = dev->data->dev_private;
325         struct hns3_hw *hw = &hns->hw;
326         struct rte_ether_addr *addr;
327         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
328         int cur_addr_num;
329         int set_addr_num;
330         int num;
331         int ret;
332         int i;
333
334         if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
335                 hns3_err(hw, "Failed to set mc mac addr, nb_mc_addr(%d) "
336                          "invalid. valid range: 0~%d",
337                          nb_mc_addr, HNS3_MC_MACADDR_NUM);
338                 return -EINVAL;
339         }
340
341         set_addr_num = (int)nb_mc_addr;
342         for (i = 0; i < set_addr_num; i++) {
343                 addr = &mc_addr_set[i];
344                 if (!rte_is_multicast_ether_addr(addr)) {
345                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
346                                               addr);
347                         hns3_err(hw,
348                                  "Failed to set mc mac addr, addr(%s) invalid.",
349                                  mac_str);
350                         return -EINVAL;
351                 }
352         }
353         rte_spinlock_lock(&hw->lock);
354         cur_addr_num = hw->mc_addrs_num;
355         for (i = 0; i < cur_addr_num; i++) {
356                 num = cur_addr_num - i - 1;
357                 addr = &hw->mc_addrs[num];
358                 ret = hns3vf_remove_mc_mac_addr(hns, addr);
359                 if (ret) {
360                         rte_spinlock_unlock(&hw->lock);
361                         return ret;
362                 }
363
364                 hw->mc_addrs_num--;
365         }
366
367         for (i = 0; i < set_addr_num; i++) {
368                 addr = &mc_addr_set[i];
369                 ret = hns3vf_add_mc_mac_addr(hns, addr);
370                 if (ret) {
371                         rte_spinlock_unlock(&hw->lock);
372                         return ret;
373                 }
374
375                 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
376                 hw->mc_addrs_num++;
377         }
378         rte_spinlock_unlock(&hw->lock);
379
380         return 0;
381 }
382
383 static int
384 hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
385 {
386         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
387         struct hns3_hw *hw = &hns->hw;
388         struct rte_ether_addr *addr;
389         int err = 0;
390         int ret;
391         int i;
392
393         for (i = 0; i < hw->mc_addrs_num; i++) {
394                 addr = &hw->mc_addrs[i];
395                 if (!rte_is_multicast_ether_addr(addr))
396                         continue;
397                 if (del)
398                         ret = hns3vf_remove_mc_mac_addr(hns, addr);
399                 else
400                         ret = hns3vf_add_mc_mac_addr(hns, addr);
401                 if (ret) {
402                         err = ret;
403                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
404                                               addr);
405                         hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d",
406                                  del ? "Remove" : "Restore", mac_str, ret);
407                 }
408         }
409         return err;
410 }
411
412 static int
413 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
414                         bool en_uc_pmc, bool en_mc_pmc)
415 {
416         struct hns3_mbx_vf_to_pf_cmd *req;
417         struct hns3_cmd_desc desc;
418         int ret;
419
420         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
421
422         /*
423          * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev driver,
424          * so there are some features for promiscuous/allmulticast mode in hns3
425          * VF PMD driver as below:
426          * 1. The promiscuous/allmulticast mode can be configured successfully
427          *    only based on the trusted VF device. If based on the non trusted
428          *    VF device, configuring promiscuous/allmulticast mode will fail.
429          *    The hns3 VF device can be confiruged as trusted device by hns3 PF
430          *    kernel ethdev driver on the host by the following command:
431          *      "ip link set <eth num> vf <vf id> turst on"
432          * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
433          *    driver can receive the ingress and outgoing traffic. In the words,
434          *    all the ingress packets, all the packets sent from the PF and
435          *    other VFs on the same physical port.
436          * 3. Note: Because of the hardware constraints, By default vlan filter
437          *    is enabled and couldn't be turned off based on VF device, so vlan
438          *    filter is still effective even in promiscuous mode. If upper
439          *    applications don't call rte_eth_dev_vlan_filter API function to
440          *    set vlan based on VF device, hns3 VF PMD driver will can't receive
441          *    the packets with vlan tag in promiscuoue mode.
442          */
443         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
444         req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
445         req->msg[1] = en_bc_pmc ? 1 : 0;
446         req->msg[2] = en_uc_pmc ? 1 : 0;
447         req->msg[3] = en_mc_pmc ? 1 : 0;
448
449         ret = hns3_cmd_send(hw, &desc, 1);
450         if (ret)
451                 hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
452
453         return ret;
454 }
455
456 static int
457 hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
458 {
459         struct hns3_adapter *hns = dev->data->dev_private;
460         struct hns3_hw *hw = &hns->hw;
461         int ret;
462
463         ret = hns3vf_set_promisc_mode(hw, true, true, true);
464         if (ret)
465                 hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
466                         ret);
467         return ret;
468 }
469
470 static int
471 hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
472 {
473         bool allmulti = dev->data->all_multicast ? true : false;
474         struct hns3_adapter *hns = dev->data->dev_private;
475         struct hns3_hw *hw = &hns->hw;
476         int ret;
477
478         ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
479         if (ret)
480                 hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
481                         ret);
482         return ret;
483 }
484
485 static int
486 hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
487 {
488         struct hns3_adapter *hns = dev->data->dev_private;
489         struct hns3_hw *hw = &hns->hw;
490         int ret;
491
492         if (dev->data->promiscuous)
493                 return 0;
494
495         ret = hns3vf_set_promisc_mode(hw, true, false, true);
496         if (ret)
497                 hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
498                         ret);
499         return ret;
500 }
501
502 static int
503 hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
504 {
505         struct hns3_adapter *hns = dev->data->dev_private;
506         struct hns3_hw *hw = &hns->hw;
507         int ret;
508
509         if (dev->data->promiscuous)
510                 return 0;
511
512         ret = hns3vf_set_promisc_mode(hw, true, false, false);
513         if (ret)
514                 hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
515                         ret);
516         return ret;
517 }
518
519 static int
520 hns3vf_restore_promisc(struct hns3_adapter *hns)
521 {
522         struct hns3_hw *hw = &hns->hw;
523         bool allmulti = hw->data->all_multicast ? true : false;
524
525         if (hw->data->promiscuous)
526                 return hns3vf_set_promisc_mode(hw, true, true, true);
527
528         return hns3vf_set_promisc_mode(hw, true, false, allmulti);
529 }
530
531 static int
532 hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
533                              bool mmap, enum hns3_ring_type queue_type,
534                              uint16_t queue_id)
535 {
536         struct hns3_vf_bind_vector_msg bind_msg;
537         const char *op_str;
538         uint16_t code;
539         int ret;
540
541         memset(&bind_msg, 0, sizeof(bind_msg));
542         code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
543                 HNS3_MBX_UNMAP_RING_TO_VECTOR;
544         bind_msg.vector_id = vector_id;
545
546         if (queue_type == HNS3_RING_TYPE_RX)
547                 bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
548         else
549                 bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
550
551         bind_msg.param[0].ring_type = queue_type;
552         bind_msg.ring_num = 1;
553         bind_msg.param[0].tqp_index = queue_id;
554         op_str = mmap ? "Map" : "Unmap";
555         ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
556                                 sizeof(bind_msg), false, NULL, 0);
557         if (ret) {
558                 hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.",
559                          op_str, queue_id, bind_msg.vector_id, ret);
560                 return ret;
561         }
562
563         return 0;
564 }
565
566 static int
567 hns3vf_init_ring_with_vector(struct hns3_hw *hw)
568 {
569         uint8_t vec;
570         int ret;
571         int i;
572
573         /*
574          * In hns3 network engine, vector 0 is always the misc interrupt of this
575          * function, vector 1~N can be used respectively for the queues of the
576          * function. Tx and Rx queues with the same number share the interrupt
577          * vector. In the initialization clearing the all hardware mapping
578          * relationship configurations between queues and interrupt vectors is
579          * needed, so some error caused by the residual configurations, such as
580          * the unexpected Tx interrupt, can be avoid. Because of the hardware
581          * constraints in hns3 hardware engine, we have to implement clearing
582          * the mapping relationship configurations by binding all queues to the
583          * last interrupt vector and reserving the last interrupt vector. This
584          * method results in a decrease of the maximum queues when upper
585          * applications call the rte_eth_dev_configure API function to enable
586          * Rx interrupt.
587          */
588         vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
589         hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
590         for (i = 0; i < hw->intr_tqps_num; i++) {
591                 /*
592                  * Set gap limiter and rate limiter configuration of queue's
593                  * interrupt.
594                  */
595                 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
596                                        HNS3_TQP_INTR_GL_DEFAULT);
597                 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
598                                        HNS3_TQP_INTR_GL_DEFAULT);
599                 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
600
601                 ret = hns3vf_bind_ring_with_vector(hw, vec, false,
602                                                    HNS3_RING_TYPE_TX, i);
603                 if (ret) {
604                         PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
605                                           "vector: %d, ret=%d", i, vec, ret);
606                         return ret;
607                 }
608
609                 ret = hns3vf_bind_ring_with_vector(hw, vec, false,
610                                                    HNS3_RING_TYPE_RX, i);
611                 if (ret) {
612                         PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
613                                           "vector: %d, ret=%d", i, vec, ret);
614                         return ret;
615                 }
616         }
617
618         return 0;
619 }
620
621 static int
622 hns3vf_dev_configure(struct rte_eth_dev *dev)
623 {
624         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
625         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
626         struct rte_eth_conf *conf = &dev->data->dev_conf;
627         enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
628         uint16_t nb_rx_q = dev->data->nb_rx_queues;
629         uint16_t nb_tx_q = dev->data->nb_tx_queues;
630         struct rte_eth_rss_conf rss_conf;
631         uint16_t mtu;
632         int ret;
633
634         /*
635          * Hardware does not support individually enable/disable/reset the Tx or
636          * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
637          * and Rx queues at the same time. When the numbers of Tx queues
638          * allocated by upper applications are not equal to the numbers of Rx
639          * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
640          * of Tx/Rx queues. otherwise, network engine can not work as usual. But
641          * these fake queues are imperceptible, and can not be used by upper
642          * applications.
643          */
644         ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
645         if (ret) {
646                 hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
647                 return ret;
648         }
649
650         hw->adapter_state = HNS3_NIC_CONFIGURING;
651         if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
652                 hns3_err(hw, "setting link speed/duplex not supported");
653                 ret = -EINVAL;
654                 goto cfg_err;
655         }
656
657         /* When RSS is not configured, redirect the packet queue 0 */
658         if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
659                 rss_conf = conf->rx_adv_conf.rss_conf;
660                 if (rss_conf.rss_key == NULL) {
661                         rss_conf.rss_key = rss_cfg->key;
662                         rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
663                 }
664
665                 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
666                 if (ret)
667                         goto cfg_err;
668         }
669
670         /*
671          * If jumbo frames are enabled, MTU needs to be refreshed
672          * according to the maximum RX packet length.
673          */
674         if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
675                 /*
676                  * Security of max_rx_pkt_len is guaranteed in dpdk frame.
677                  * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
678                  * can safely assign to "uint16_t" type variable.
679                  */
680                 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
681                 ret = hns3vf_dev_mtu_set(dev, mtu);
682                 if (ret)
683                         goto cfg_err;
684                 dev->data->mtu = mtu;
685         }
686
687         ret = hns3vf_dev_configure_vlan(dev);
688         if (ret)
689                 goto cfg_err;
690
691         hw->adapter_state = HNS3_NIC_CONFIGURED;
692         return 0;
693
694 cfg_err:
695         (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
696         hw->adapter_state = HNS3_NIC_INITIALIZED;
697
698         return ret;
699 }
700
701 static int
702 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
703 {
704         int ret;
705
706         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
707                                 sizeof(mtu), true, NULL, 0);
708         if (ret)
709                 hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
710
711         return ret;
712 }
713
714 static int
715 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
716 {
717         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
718         uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
719         int ret;
720
721         /*
722          * The hns3 PF/VF devices on the same port share the hardware MTU
723          * configuration. Currently, we send mailbox to inform hns3 PF kernel
724          * ethdev driver to finish hardware MTU configuration in hns3 VF PMD
725          * driver, there is no need to stop the port for hns3 VF device, and the
726          * MTU value issued by hns3 VF PMD driver must be less than or equal to
727          * PF's MTU.
728          */
729         if (rte_atomic16_read(&hw->reset.resetting)) {
730                 hns3_err(hw, "Failed to set mtu during resetting");
731                 return -EIO;
732         }
733
734         rte_spinlock_lock(&hw->lock);
735         ret = hns3vf_config_mtu(hw, mtu);
736         if (ret) {
737                 rte_spinlock_unlock(&hw->lock);
738                 return ret;
739         }
740         if (frame_size > RTE_ETHER_MAX_LEN)
741                 dev->data->dev_conf.rxmode.offloads |=
742                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
743         else
744                 dev->data->dev_conf.rxmode.offloads &=
745                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
746         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
747         rte_spinlock_unlock(&hw->lock);
748
749         return 0;
750 }
751
752 static int
753 hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
754 {
755         struct hns3_adapter *hns = eth_dev->data->dev_private;
756         struct hns3_hw *hw = &hns->hw;
757         uint16_t q_num = hw->tqps_num;
758
759         /*
760          * In interrupt mode, 'max_rx_queues' is set based on the number of
761          * MSI-X interrupt resources of the hardware.
762          */
763         if (hw->data->dev_conf.intr_conf.rxq == 1)
764                 q_num = hw->intr_tqps_num;
765
766         info->max_rx_queues = q_num;
767         info->max_tx_queues = hw->tqps_num;
768         info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
769         info->min_rx_bufsize = hw->rx_buf_len;
770         info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
771         info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
772
773         info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
774                                  DEV_RX_OFFLOAD_UDP_CKSUM |
775                                  DEV_RX_OFFLOAD_TCP_CKSUM |
776                                  DEV_RX_OFFLOAD_SCTP_CKSUM |
777                                  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
778                                  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
779                                  DEV_RX_OFFLOAD_KEEP_CRC |
780                                  DEV_RX_OFFLOAD_SCATTER |
781                                  DEV_RX_OFFLOAD_VLAN_STRIP |
782                                  DEV_RX_OFFLOAD_QINQ_STRIP |
783                                  DEV_RX_OFFLOAD_VLAN_FILTER |
784                                  DEV_RX_OFFLOAD_JUMBO_FRAME);
785         info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
786         info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
787                                  DEV_TX_OFFLOAD_IPV4_CKSUM |
788                                  DEV_TX_OFFLOAD_TCP_CKSUM |
789                                  DEV_TX_OFFLOAD_UDP_CKSUM |
790                                  DEV_TX_OFFLOAD_SCTP_CKSUM |
791                                  DEV_TX_OFFLOAD_VLAN_INSERT |
792                                  DEV_TX_OFFLOAD_QINQ_INSERT |
793                                  DEV_TX_OFFLOAD_MULTI_SEGS |
794                                  DEV_TX_OFFLOAD_TCP_TSO |
795                                  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
796                                  DEV_TX_OFFLOAD_GRE_TNL_TSO |
797                                  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
798                                  info->tx_queue_offload_capa);
799
800         info->rx_desc_lim = (struct rte_eth_desc_lim) {
801                 .nb_max = HNS3_MAX_RING_DESC,
802                 .nb_min = HNS3_MIN_RING_DESC,
803                 .nb_align = HNS3_ALIGN_RING_DESC,
804         };
805
806         info->tx_desc_lim = (struct rte_eth_desc_lim) {
807                 .nb_max = HNS3_MAX_RING_DESC,
808                 .nb_min = HNS3_MIN_RING_DESC,
809                 .nb_align = HNS3_ALIGN_RING_DESC,
810         };
811
812         info->vmdq_queue_num = 0;
813
814         info->reta_size = HNS3_RSS_IND_TBL_SIZE;
815         info->hash_key_size = HNS3_RSS_KEY_SIZE;
816         info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
817         info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
818         info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
819
820         return 0;
821 }
822
823 static void
824 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
825 {
826         hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
827 }
828
829 static void
830 hns3vf_disable_irq0(struct hns3_hw *hw)
831 {
832         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
833 }
834
835 static void
836 hns3vf_enable_irq0(struct hns3_hw *hw)
837 {
838         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
839 }
840
841 static enum hns3vf_evt_cause
842 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
843 {
844         struct hns3_hw *hw = &hns->hw;
845         enum hns3vf_evt_cause ret;
846         uint32_t cmdq_stat_reg;
847         uint32_t rst_ing_reg;
848         uint32_t val;
849
850         /* Fetch the events from their corresponding regs */
851         cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
852
853         if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
854                 rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
855                 hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
856                 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
857                 rte_atomic16_set(&hw->reset.disable_cmd, 1);
858                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
859                 hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
860                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
861                 if (clearval) {
862                         hw->reset.stats.global_cnt++;
863                         hns3_warn(hw, "Global reset detected, clear reset status");
864                 } else {
865                         hns3_schedule_delayed_reset(hns);
866                         hns3_warn(hw, "Global reset detected, don't clear reset status");
867                 }
868
869                 ret = HNS3VF_VECTOR0_EVENT_RST;
870                 goto out;
871         }
872
873         /* Check for vector0 mailbox(=CMDQ RX) event source */
874         if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
875                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
876                 ret = HNS3VF_VECTOR0_EVENT_MBX;
877                 goto out;
878         }
879
880         val = 0;
881         ret = HNS3VF_VECTOR0_EVENT_OTHER;
882 out:
883         if (clearval)
884                 *clearval = val;
885         return ret;
886 }
887
888 static void
889 hns3vf_interrupt_handler(void *param)
890 {
891         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
892         struct hns3_adapter *hns = dev->data->dev_private;
893         struct hns3_hw *hw = &hns->hw;
894         enum hns3vf_evt_cause event_cause;
895         uint32_t clearval;
896
897         if (hw->irq_thread_id == 0)
898                 hw->irq_thread_id = pthread_self();
899
900         /* Disable interrupt */
901         hns3vf_disable_irq0(hw);
902
903         /* Read out interrupt causes */
904         event_cause = hns3vf_check_event_cause(hns, &clearval);
905
906         switch (event_cause) {
907         case HNS3VF_VECTOR0_EVENT_RST:
908                 hns3_schedule_reset(hns);
909                 break;
910         case HNS3VF_VECTOR0_EVENT_MBX:
911                 hns3_dev_handle_mbx_msg(hw);
912                 break;
913         default:
914                 break;
915         }
916
917         /* Clear interrupt causes */
918         hns3vf_clear_event_cause(hw, clearval);
919
920         /* Enable interrupt */
921         hns3vf_enable_irq0(hw);
922 }
923
924 static int
925 hns3vf_check_tqp_info(struct hns3_hw *hw)
926 {
927         uint16_t tqps_num;
928
929         tqps_num = hw->tqps_num;
930         if (tqps_num > HNS3_MAX_TQP_NUM_PER_FUNC || tqps_num == 0) {
931                 PMD_INIT_LOG(ERR, "Get invalid tqps_num(%u) from PF. valid "
932                                   "range: 1~%d",
933                              tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
934                 return -EINVAL;
935         }
936
937         if (hw->rx_buf_len == 0)
938                 hw->rx_buf_len = HNS3_DEFAULT_RX_BUF_LEN;
939         hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, hw->tqps_num);
940
941         return 0;
942 }
943
944 static int
945 hns3vf_get_queue_info(struct hns3_hw *hw)
946 {
947 #define HNS3VF_TQPS_RSS_INFO_LEN        6
948         uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
949         int ret;
950
951         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
952                                 resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
953         if (ret) {
954                 PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
955                 return ret;
956         }
957
958         memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
959         memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
960         memcpy(&hw->rx_buf_len, &resp_msg[4], sizeof(uint16_t));
961
962         return hns3vf_check_tqp_info(hw);
963 }
964
965 static int
966 hns3vf_get_queue_depth(struct hns3_hw *hw)
967 {
968 #define HNS3VF_TQPS_DEPTH_INFO_LEN      4
969         uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN];
970         int ret;
971
972         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true,
973                                 resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN);
974         if (ret) {
975                 PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d",
976                              ret);
977                 return ret;
978         }
979
980         memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t));
981         memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t));
982
983         return 0;
984 }
985
986 static int
987 hns3vf_get_tc_info(struct hns3_hw *hw)
988 {
989         uint8_t resp_msg;
990         int ret;
991
992         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0,
993                                 true, &resp_msg, sizeof(resp_msg));
994         if (ret) {
995                 hns3_err(hw, "VF request to get TC info from PF failed %d",
996                          ret);
997                 return ret;
998         }
999
1000         hw->hw_tc_map = resp_msg;
1001
1002         return 0;
1003 }
1004
1005 static int
1006 hns3vf_get_host_mac_addr(struct hns3_hw *hw)
1007 {
1008         uint8_t host_mac[RTE_ETHER_ADDR_LEN];
1009         int ret;
1010
1011         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
1012                                 true, host_mac, RTE_ETHER_ADDR_LEN);
1013         if (ret) {
1014                 hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
1015                 return ret;
1016         }
1017
1018         memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
1019
1020         return 0;
1021 }
1022
1023 static int
1024 hns3vf_get_configuration(struct hns3_hw *hw)
1025 {
1026         int ret;
1027
1028         hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
1029
1030         /* Get queue configuration from PF */
1031         ret = hns3vf_get_queue_info(hw);
1032         if (ret)
1033                 return ret;
1034
1035         /* Get queue depth info from PF */
1036         ret = hns3vf_get_queue_depth(hw);
1037         if (ret)
1038                 return ret;
1039
1040         /* Get user defined VF MAC addr from PF */
1041         ret = hns3vf_get_host_mac_addr(hw);
1042         if (ret)
1043                 return ret;
1044
1045         /* Get tc configuration from PF */
1046         return hns3vf_get_tc_info(hw);
1047 }
1048
1049 static int
1050 hns3vf_set_tc_info(struct hns3_adapter *hns)
1051 {
1052         struct hns3_hw *hw = &hns->hw;
1053         uint16_t nb_rx_q = hw->data->nb_rx_queues;
1054         uint16_t nb_tx_q = hw->data->nb_tx_queues;
1055         uint8_t i;
1056
1057         hw->num_tc = 0;
1058         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
1059                 if (hw->hw_tc_map & BIT(i))
1060                         hw->num_tc++;
1061
1062         if (nb_rx_q < hw->num_tc) {
1063                 hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
1064                          nb_rx_q, hw->num_tc);
1065                 return -EINVAL;
1066         }
1067
1068         if (nb_tx_q < hw->num_tc) {
1069                 hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
1070                          nb_tx_q, hw->num_tc);
1071                 return -EINVAL;
1072         }
1073
1074         hns3_set_rss_size(hw, nb_rx_q);
1075         hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
1076
1077         return 0;
1078 }
1079
1080 static void
1081 hns3vf_request_link_info(struct hns3_hw *hw)
1082 {
1083         uint8_t resp_msg;
1084         int ret;
1085
1086         if (rte_atomic16_read(&hw->reset.resetting))
1087                 return;
1088         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
1089                                 &resp_msg, sizeof(resp_msg));
1090         if (ret)
1091                 hns3_err(hw, "Failed to fetch link status from PF: %d", ret);
1092 }
1093
1094 static int
1095 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
1096 {
1097 #define HNS3VF_VLAN_MBX_MSG_LEN 5
1098         struct hns3_hw *hw = &hns->hw;
1099         uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
1100         uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
1101         uint8_t is_kill = on ? 0 : 1;
1102
1103         msg_data[0] = is_kill;
1104         memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1105         memcpy(&msg_data[3], &proto, sizeof(proto));
1106
1107         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
1108                                  msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
1109                                  0);
1110 }
1111
1112 static int
1113 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1114 {
1115         struct hns3_adapter *hns = dev->data->dev_private;
1116         struct hns3_hw *hw = &hns->hw;
1117         int ret;
1118
1119         if (rte_atomic16_read(&hw->reset.resetting)) {
1120                 hns3_err(hw,
1121                          "vf set vlan id failed during resetting, vlan_id =%u",
1122                          vlan_id);
1123                 return -EIO;
1124         }
1125         rte_spinlock_lock(&hw->lock);
1126         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1127         rte_spinlock_unlock(&hw->lock);
1128         if (ret)
1129                 hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
1130                          vlan_id, ret);
1131
1132         return ret;
1133 }
1134
1135 static int
1136 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
1137 {
1138         uint8_t msg_data;
1139         int ret;
1140
1141         msg_data = enable ? 1 : 0;
1142         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
1143                                 &msg_data, sizeof(msg_data), false, NULL, 0);
1144         if (ret)
1145                 hns3_err(hw, "vf enable strip failed, ret =%d", ret);
1146
1147         return ret;
1148 }
1149
1150 static int
1151 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1152 {
1153         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1154         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1155         unsigned int tmp_mask;
1156
1157         tmp_mask = (unsigned int)mask;
1158         /* Vlan stripping setting */
1159         if (tmp_mask & ETH_VLAN_STRIP_MASK) {
1160                 rte_spinlock_lock(&hw->lock);
1161                 /* Enable or disable VLAN stripping */
1162                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1163                         hns3vf_en_hw_strip_rxvtag(hw, true);
1164                 else
1165                         hns3vf_en_hw_strip_rxvtag(hw, false);
1166                 rte_spinlock_unlock(&hw->lock);
1167         }
1168
1169         return 0;
1170 }
1171
1172 static int
1173 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
1174 {
1175         struct rte_vlan_filter_conf *vfc;
1176         struct hns3_hw *hw = &hns->hw;
1177         uint16_t vlan_id;
1178         uint64_t vbit;
1179         uint64_t ids;
1180         int ret = 0;
1181         uint32_t i;
1182
1183         vfc = &hw->data->vlan_filter_conf;
1184         for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1185                 if (vfc->ids[i] == 0)
1186                         continue;
1187                 ids = vfc->ids[i];
1188                 while (ids) {
1189                         /*
1190                          * 64 means the num bits of ids, one bit corresponds to
1191                          * one vlan id
1192                          */
1193                         vlan_id = 64 * i;
1194                         /* count trailing zeroes */
1195                         vbit = ~ids & (ids - 1);
1196                         /* clear least significant bit set */
1197                         ids ^= (ids ^ (ids - 1)) ^ vbit;
1198                         for (; vbit;) {
1199                                 vbit >>= 1;
1200                                 vlan_id++;
1201                         }
1202                         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1203                         if (ret) {
1204                                 hns3_err(hw,
1205                                          "VF handle vlan table failed, ret =%d, on = %d",
1206                                          ret, on);
1207                                 return ret;
1208                         }
1209                 }
1210         }
1211
1212         return ret;
1213 }
1214
1215 static int
1216 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1217 {
1218         return hns3vf_handle_all_vlan_table(hns, 0);
1219 }
1220
1221 static int
1222 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1223 {
1224         struct hns3_hw *hw = &hns->hw;
1225         struct rte_eth_conf *dev_conf;
1226         bool en;
1227         int ret;
1228
1229         dev_conf = &hw->data->dev_conf;
1230         en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
1231                                                                    : false;
1232         ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1233         if (ret)
1234                 hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1235                          ret);
1236         return ret;
1237 }
1238
1239 static int
1240 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1241 {
1242         struct hns3_adapter *hns = dev->data->dev_private;
1243         struct rte_eth_dev_data *data = dev->data;
1244         struct hns3_hw *hw = &hns->hw;
1245         int ret;
1246
1247         if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1248             data->dev_conf.txmode.hw_vlan_reject_untagged ||
1249             data->dev_conf.txmode.hw_vlan_insert_pvid) {
1250                 hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1251                               "or hw_vlan_insert_pvid is not support!");
1252         }
1253
1254         /* Apply vlan offload setting */
1255         ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1256         if (ret)
1257                 hns3_err(hw, "dev config vlan offload failed, ret =%d", ret);
1258
1259         return ret;
1260 }
1261
1262 static int
1263 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1264 {
1265         uint8_t msg_data;
1266
1267         msg_data = alive ? 1 : 0;
1268         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
1269                                  sizeof(msg_data), false, NULL, 0);
1270 }
1271
1272 static void
1273 hns3vf_keep_alive_handler(void *param)
1274 {
1275         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1276         struct hns3_adapter *hns = eth_dev->data->dev_private;
1277         struct hns3_hw *hw = &hns->hw;
1278         uint8_t respmsg;
1279         int ret;
1280
1281         ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
1282                                 false, &respmsg, sizeof(uint8_t));
1283         if (ret)
1284                 hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1285                          ret);
1286
1287         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1288                           eth_dev);
1289 }
1290
1291 static void
1292 hns3vf_service_handler(void *param)
1293 {
1294         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1295         struct hns3_adapter *hns = eth_dev->data->dev_private;
1296         struct hns3_hw *hw = &hns->hw;
1297
1298         /*
1299          * The query link status and reset processing are executed in the
1300          * interrupt thread.When the IMP reset occurs, IMP will not respond,
1301          * and the query operation will time out after 30ms. In the case of
1302          * multiple PF/VFs, each query failure timeout causes the IMP reset
1303          * interrupt to fail to respond within 100ms.
1304          * Before querying the link status, check whether there is a reset
1305          * pending, and if so, abandon the query.
1306          */
1307         if (!hns3vf_is_reset_pending(hns))
1308                 hns3vf_request_link_info(hw);
1309         else
1310                 hns3_warn(hw, "Cancel the query when reset is pending");
1311
1312         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1313                           eth_dev);
1314 }
1315
1316 static int
1317 hns3_query_vf_resource(struct hns3_hw *hw)
1318 {
1319         struct hns3_vf_res_cmd *req;
1320         struct hns3_cmd_desc desc;
1321         uint16_t num_msi;
1322         int ret;
1323
1324         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
1325         ret = hns3_cmd_send(hw, &desc, 1);
1326         if (ret) {
1327                 hns3_err(hw, "query vf resource failed, ret = %d", ret);
1328                 return ret;
1329         }
1330
1331         req = (struct hns3_vf_res_cmd *)desc.data;
1332         num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
1333                                  HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
1334         if (num_msi < HNS3_MIN_VECTOR_NUM) {
1335                 hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
1336                          num_msi, HNS3_MIN_VECTOR_NUM);
1337                 return -EINVAL;
1338         }
1339
1340         hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
1341
1342         return 0;
1343 }
1344
1345 static int
1346 hns3vf_init_hardware(struct hns3_adapter *hns)
1347 {
1348         struct hns3_hw *hw = &hns->hw;
1349         uint16_t mtu = hw->data->mtu;
1350         int ret;
1351
1352         ret = hns3vf_set_promisc_mode(hw, true, false, false);
1353         if (ret)
1354                 return ret;
1355
1356         ret = hns3vf_config_mtu(hw, mtu);
1357         if (ret)
1358                 goto err_init_hardware;
1359
1360         ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1361         if (ret) {
1362                 PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1363                 goto err_init_hardware;
1364         }
1365
1366         ret = hns3_config_gro(hw, false);
1367         if (ret) {
1368                 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1369                 goto err_init_hardware;
1370         }
1371
1372         ret = hns3vf_set_alive(hw, true);
1373         if (ret) {
1374                 PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1375                 goto err_init_hardware;
1376         }
1377
1378         hns3vf_request_link_info(hw);
1379         return 0;
1380
1381 err_init_hardware:
1382         (void)hns3vf_set_promisc_mode(hw, false, false, false);
1383         return ret;
1384 }
1385
1386 static int
1387 hns3vf_clear_vport_list(struct hns3_hw *hw)
1388 {
1389         return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
1390                                  HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
1391                                  NULL, 0);
1392 }
1393
1394 static int
1395 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1396 {
1397         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1398         struct hns3_adapter *hns = eth_dev->data->dev_private;
1399         struct hns3_hw *hw = &hns->hw;
1400         int ret;
1401
1402         PMD_INIT_FUNC_TRACE();
1403
1404         /* Get hardware io base address from pcie BAR2 IO space */
1405         hw->io_base = pci_dev->mem_resource[2].addr;
1406
1407         /* Firmware command queue initialize */
1408         ret = hns3_cmd_init_queue(hw);
1409         if (ret) {
1410                 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1411                 goto err_cmd_init_queue;
1412         }
1413
1414         /* Firmware command initialize */
1415         ret = hns3_cmd_init(hw);
1416         if (ret) {
1417                 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1418                 goto err_cmd_init;
1419         }
1420
1421         /* Get VF resource */
1422         ret = hns3_query_vf_resource(hw);
1423         if (ret)
1424                 goto err_cmd_init;
1425
1426         rte_spinlock_init(&hw->mbx_resp.lock);
1427
1428         hns3vf_clear_event_cause(hw, 0);
1429
1430         ret = rte_intr_callback_register(&pci_dev->intr_handle,
1431                                          hns3vf_interrupt_handler, eth_dev);
1432         if (ret) {
1433                 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1434                 goto err_intr_callback_register;
1435         }
1436
1437         /* Enable interrupt */
1438         rte_intr_enable(&pci_dev->intr_handle);
1439         hns3vf_enable_irq0(hw);
1440
1441         /* Get configuration from PF */
1442         ret = hns3vf_get_configuration(hw);
1443         if (ret) {
1444                 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1445                 goto err_get_config;
1446         }
1447
1448         /*
1449          * The hns3 PF ethdev driver in kernel support setting VF MAC address
1450          * on the host by "ip link set ..." command. To avoid some incorrect
1451          * scenes, for example, hns3 VF PMD driver fails to receive and send
1452          * packets after user configure the MAC address by using the
1453          * "ip link set ..." command, hns3 VF PMD driver keep the same MAC
1454          * address strategy as the hns3 kernel ethdev driver in the
1455          * initialization. If user configure a MAC address by the ip command
1456          * for VF device, then hns3 VF PMD driver will start with it, otherwise
1457          * start with a random MAC address in the initialization.
1458          */
1459         ret = rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr);
1460         if (ret)
1461                 rte_eth_random_addr(hw->mac.mac_addr);
1462
1463         ret = hns3vf_clear_vport_list(hw);
1464         if (ret) {
1465                 PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1466                 goto err_get_config;
1467         }
1468
1469         ret = hns3vf_init_hardware(hns);
1470         if (ret)
1471                 goto err_get_config;
1472
1473         hns3_set_default_rss_args(hw);
1474
1475         /*
1476          * In the initialization clearing the all hardware mapping relationship
1477          * configurations between queues and interrupt vectors is needed, so
1478          * some error caused by the residual configurations, such as the
1479          * unexpected interrupt, can be avoid.
1480          */
1481         ret = hns3vf_init_ring_with_vector(hw);
1482         if (ret)
1483                 goto err_get_config;
1484
1485         return 0;
1486
1487 err_get_config:
1488         hns3vf_disable_irq0(hw);
1489         rte_intr_disable(&pci_dev->intr_handle);
1490         hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
1491                              eth_dev);
1492 err_intr_callback_register:
1493         hns3_cmd_uninit(hw);
1494
1495 err_cmd_init:
1496         hns3_cmd_destroy_queue(hw);
1497
1498 err_cmd_init_queue:
1499         hw->io_base = NULL;
1500
1501         return ret;
1502 }
1503
1504 static void
1505 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1506 {
1507         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1508         struct hns3_adapter *hns = eth_dev->data->dev_private;
1509         struct hns3_hw *hw = &hns->hw;
1510
1511         PMD_INIT_FUNC_TRACE();
1512
1513         hns3_rss_uninit(hns);
1514         (void)hns3vf_set_alive(hw, false);
1515         (void)hns3vf_set_promisc_mode(hw, false, false, false);
1516         hns3vf_disable_irq0(hw);
1517         rte_intr_disable(&pci_dev->intr_handle);
1518         hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
1519                              eth_dev);
1520         hns3_cmd_uninit(hw);
1521         hns3_cmd_destroy_queue(hw);
1522         hw->io_base = NULL;
1523 }
1524
1525 static int
1526 hns3vf_do_stop(struct hns3_adapter *hns)
1527 {
1528         struct hns3_hw *hw = &hns->hw;
1529         bool reset_queue;
1530
1531         hw->mac.link_status = ETH_LINK_DOWN;
1532
1533         if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
1534                 hns3vf_configure_mac_addr(hns, true);
1535                 reset_queue = true;
1536         } else
1537                 reset_queue = false;
1538         return hns3_stop_queues(hns, reset_queue);
1539 }
1540
1541 static void
1542 hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
1543 {
1544         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1545         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1546         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1547         uint8_t base = 0;
1548         uint8_t vec = 0;
1549         uint16_t q_id;
1550
1551         if (dev->data->dev_conf.intr_conf.rxq == 0)
1552                 return;
1553
1554         /* unmap the ring with vector */
1555         if (rte_intr_allow_others(intr_handle)) {
1556                 vec = RTE_INTR_VEC_RXTX_OFFSET;
1557                 base = RTE_INTR_VEC_RXTX_OFFSET;
1558         }
1559         if (rte_intr_dp_is_en(intr_handle)) {
1560                 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
1561                         (void)hns3vf_bind_ring_with_vector(hw, vec, false,
1562                                                            HNS3_RING_TYPE_RX,
1563                                                            q_id);
1564                         if (vec < base + intr_handle->nb_efd - 1)
1565                                 vec++;
1566                 }
1567         }
1568         /* Clean datapath event and queue/vec mapping */
1569         rte_intr_efd_disable(intr_handle);
1570         if (intr_handle->intr_vec) {
1571                 rte_free(intr_handle->intr_vec);
1572                 intr_handle->intr_vec = NULL;
1573         }
1574 }
1575
1576 static void
1577 hns3vf_dev_stop(struct rte_eth_dev *dev)
1578 {
1579         struct hns3_adapter *hns = dev->data->dev_private;
1580         struct hns3_hw *hw = &hns->hw;
1581
1582         PMD_INIT_FUNC_TRACE();
1583
1584         hw->adapter_state = HNS3_NIC_STOPPING;
1585         hns3_set_rxtx_function(dev);
1586         rte_wmb();
1587         /* Disable datapath on secondary process. */
1588         hns3_mp_req_stop_rxtx(dev);
1589         /* Prevent crashes when queues are still in use. */
1590         rte_delay_ms(hw->tqps_num);
1591
1592         rte_spinlock_lock(&hw->lock);
1593         if (rte_atomic16_read(&hw->reset.resetting) == 0) {
1594                 hns3vf_do_stop(hns);
1595                 hns3_dev_release_mbufs(hns);
1596                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1597         }
1598         rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1599         rte_spinlock_unlock(&hw->lock);
1600
1601         hns3vf_unmap_rx_interrupt(dev);
1602 }
1603
1604 static void
1605 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1606 {
1607         struct hns3_adapter *hns = eth_dev->data->dev_private;
1608         struct hns3_hw *hw = &hns->hw;
1609
1610         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1611                 return;
1612
1613         if (hw->adapter_state == HNS3_NIC_STARTED)
1614                 hns3vf_dev_stop(eth_dev);
1615
1616         hw->adapter_state = HNS3_NIC_CLOSING;
1617         hns3_reset_abort(hns);
1618         hw->adapter_state = HNS3_NIC_CLOSED;
1619         rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1620         hns3vf_configure_all_mc_mac_addr(hns, true);
1621         hns3vf_remove_all_vlan_table(hns);
1622         hns3vf_uninit_vf(eth_dev);
1623         hns3_free_all_queues(eth_dev);
1624         rte_free(hw->reset.wait_data);
1625         rte_free(eth_dev->process_private);
1626         eth_dev->process_private = NULL;
1627         hns3_mp_uninit_primary();
1628         hns3_warn(hw, "Close port %d finished", hw->data->port_id);
1629 }
1630
1631 static int
1632 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1633                        __rte_unused int wait_to_complete)
1634 {
1635         struct hns3_adapter *hns = eth_dev->data->dev_private;
1636         struct hns3_hw *hw = &hns->hw;
1637         struct hns3_mac *mac = &hw->mac;
1638         struct rte_eth_link new_link;
1639
1640         memset(&new_link, 0, sizeof(new_link));
1641         switch (mac->link_speed) {
1642         case ETH_SPEED_NUM_10M:
1643         case ETH_SPEED_NUM_100M:
1644         case ETH_SPEED_NUM_1G:
1645         case ETH_SPEED_NUM_10G:
1646         case ETH_SPEED_NUM_25G:
1647         case ETH_SPEED_NUM_40G:
1648         case ETH_SPEED_NUM_50G:
1649         case ETH_SPEED_NUM_100G:
1650                 new_link.link_speed = mac->link_speed;
1651                 break;
1652         default:
1653                 new_link.link_speed = ETH_SPEED_NUM_100M;
1654                 break;
1655         }
1656
1657         new_link.link_duplex = mac->link_duplex;
1658         new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
1659         new_link.link_autoneg =
1660             !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
1661
1662         return rte_eth_linkstatus_set(eth_dev, &new_link);
1663 }
1664
1665 static int
1666 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1667 {
1668         struct hns3_hw *hw = &hns->hw;
1669         int ret;
1670
1671         ret = hns3vf_set_tc_info(hns);
1672         if (ret)
1673                 return ret;
1674
1675         ret = hns3_start_queues(hns, reset_queue);
1676         if (ret) {
1677                 hns3_err(hw, "Failed to start queues: %d", ret);
1678                 return ret;
1679         }
1680
1681         return 0;
1682 }
1683
1684 static int
1685 hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
1686 {
1687         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1688         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1689         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1690         uint32_t intr_vector;
1691         uint8_t base = 0;
1692         uint8_t vec = 0;
1693         uint16_t q_id;
1694         int ret;
1695
1696         if (dev->data->dev_conf.intr_conf.rxq == 0)
1697                 return 0;
1698
1699         /* disable uio/vfio intr/eventfd mapping */
1700         rte_intr_disable(intr_handle);
1701
1702         /* check and configure queue intr-vector mapping */
1703         if (rte_intr_cap_multiple(intr_handle) ||
1704             !RTE_ETH_DEV_SRIOV(dev).active) {
1705                 intr_vector = hw->used_rx_queues;
1706                 /* It creates event fd for each intr vector when MSIX is used */
1707                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1708                         return -EINVAL;
1709         }
1710         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1711                 intr_handle->intr_vec =
1712                         rte_zmalloc("intr_vec",
1713                                     hw->used_rx_queues * sizeof(int), 0);
1714                 if (intr_handle->intr_vec == NULL) {
1715                         hns3_err(hw, "Failed to allocate %d rx_queues"
1716                                      " intr_vec", hw->used_rx_queues);
1717                         ret = -ENOMEM;
1718                         goto vf_alloc_intr_vec_error;
1719                 }
1720         }
1721
1722         if (rte_intr_allow_others(intr_handle)) {
1723                 vec = RTE_INTR_VEC_RXTX_OFFSET;
1724                 base = RTE_INTR_VEC_RXTX_OFFSET;
1725         }
1726         if (rte_intr_dp_is_en(intr_handle)) {
1727                 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
1728                         ret = hns3vf_bind_ring_with_vector(hw, vec, true,
1729                                                            HNS3_RING_TYPE_RX,
1730                                                            q_id);
1731                         if (ret)
1732                                 goto vf_bind_vector_error;
1733                         intr_handle->intr_vec[q_id] = vec;
1734                         if (vec < base + intr_handle->nb_efd - 1)
1735                                 vec++;
1736                 }
1737         }
1738         rte_intr_enable(intr_handle);
1739         return 0;
1740
1741 vf_bind_vector_error:
1742         rte_intr_efd_disable(intr_handle);
1743         if (intr_handle->intr_vec) {
1744                 free(intr_handle->intr_vec);
1745                 intr_handle->intr_vec = NULL;
1746         }
1747         return ret;
1748 vf_alloc_intr_vec_error:
1749         rte_intr_efd_disable(intr_handle);
1750         return ret;
1751 }
1752
1753 static int
1754 hns3vf_dev_start(struct rte_eth_dev *dev)
1755 {
1756         struct hns3_adapter *hns = dev->data->dev_private;
1757         struct hns3_hw *hw = &hns->hw;
1758         int ret;
1759
1760         PMD_INIT_FUNC_TRACE();
1761         if (rte_atomic16_read(&hw->reset.resetting))
1762                 return -EBUSY;
1763
1764         rte_spinlock_lock(&hw->lock);
1765         hw->adapter_state = HNS3_NIC_STARTING;
1766         ret = hns3vf_do_start(hns, true);
1767         if (ret) {
1768                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1769                 rte_spinlock_unlock(&hw->lock);
1770                 return ret;
1771         }
1772         hw->adapter_state = HNS3_NIC_STARTED;
1773         rte_spinlock_unlock(&hw->lock);
1774
1775         ret = hns3vf_map_rx_interrupt(dev);
1776         if (ret)
1777                 return ret;
1778         hns3_set_rxtx_function(dev);
1779         hns3_mp_req_start_rxtx(dev);
1780         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
1781
1782         return ret;
1783 }
1784
1785 static bool
1786 is_vf_reset_done(struct hns3_hw *hw)
1787 {
1788 #define HNS3_FUN_RST_ING_BITS \
1789         (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1790          BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1791          BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1792          BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1793
1794         uint32_t val;
1795
1796         if (hw->reset.level == HNS3_VF_RESET) {
1797                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1798                 if (val & HNS3_VF_RST_ING_BIT)
1799                         return false;
1800         } else {
1801                 val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1802                 if (val & HNS3_FUN_RST_ING_BITS)
1803                         return false;
1804         }
1805         return true;
1806 }
1807
1808 bool
1809 hns3vf_is_reset_pending(struct hns3_adapter *hns)
1810 {
1811         struct hns3_hw *hw = &hns->hw;
1812         enum hns3_reset_level reset;
1813
1814         hns3vf_check_event_cause(hns, NULL);
1815         reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
1816         if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
1817                 hns3_warn(hw, "High level reset %d is pending", reset);
1818                 return true;
1819         }
1820         return false;
1821 }
1822
1823 static int
1824 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1825 {
1826         struct hns3_hw *hw = &hns->hw;
1827         struct hns3_wait_data *wait_data = hw->reset.wait_data;
1828         struct timeval tv;
1829
1830         if (wait_data->result == HNS3_WAIT_SUCCESS) {
1831                 /*
1832                  * After vf reset is ready, the PF may not have completed
1833                  * the reset processing. The vf sending mbox to PF may fail
1834                  * during the pf reset, so it is better to add extra delay.
1835                  */
1836                 if (hw->reset.level == HNS3_VF_FUNC_RESET ||
1837                     hw->reset.level == HNS3_FLR_RESET)
1838                         return 0;
1839                 /* Reset retry process, no need to add extra delay. */
1840                 if (hw->reset.attempts)
1841                         return 0;
1842                 if (wait_data->check_completion == NULL)
1843                         return 0;
1844
1845                 wait_data->check_completion = NULL;
1846                 wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC;
1847                 wait_data->count = 1;
1848                 wait_data->result = HNS3_WAIT_REQUEST;
1849                 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
1850                                   wait_data);
1851                 hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete");
1852                 return -EAGAIN;
1853         } else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1854                 gettimeofday(&tv, NULL);
1855                 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1856                           tv.tv_sec, tv.tv_usec);
1857                 return -ETIME;
1858         } else if (wait_data->result == HNS3_WAIT_REQUEST)
1859                 return -EAGAIN;
1860
1861         wait_data->hns = hns;
1862         wait_data->check_completion = is_vf_reset_done;
1863         wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1864                                       HNS3VF_RESET_WAIT_MS + get_timeofday_ms();
1865         wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1866         wait_data->count = HNS3VF_RESET_WAIT_CNT;
1867         wait_data->result = HNS3_WAIT_REQUEST;
1868         rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1869         return -EAGAIN;
1870 }
1871
1872 static int
1873 hns3vf_prepare_reset(struct hns3_adapter *hns)
1874 {
1875         struct hns3_hw *hw = &hns->hw;
1876         int ret = 0;
1877
1878         if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1879                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
1880                                         0, true, NULL, 0);
1881         }
1882         rte_atomic16_set(&hw->reset.disable_cmd, 1);
1883
1884         return ret;
1885 }
1886
1887 static int
1888 hns3vf_stop_service(struct hns3_adapter *hns)
1889 {
1890         struct hns3_hw *hw = &hns->hw;
1891         struct rte_eth_dev *eth_dev;
1892
1893         eth_dev = &rte_eth_devices[hw->data->port_id];
1894         if (hw->adapter_state == HNS3_NIC_STARTED)
1895                 rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
1896         hw->mac.link_status = ETH_LINK_DOWN;
1897
1898         hns3_set_rxtx_function(eth_dev);
1899         rte_wmb();
1900         /* Disable datapath on secondary process. */
1901         hns3_mp_req_stop_rxtx(eth_dev);
1902         rte_delay_ms(hw->tqps_num);
1903
1904         rte_spinlock_lock(&hw->lock);
1905         if (hw->adapter_state == HNS3_NIC_STARTED ||
1906             hw->adapter_state == HNS3_NIC_STOPPING) {
1907                 hns3vf_do_stop(hns);
1908                 hw->reset.mbuf_deferred_free = true;
1909         } else
1910                 hw->reset.mbuf_deferred_free = false;
1911
1912         /*
1913          * It is cumbersome for hardware to pick-and-choose entries for deletion
1914          * from table space. Hence, for function reset software intervention is
1915          * required to delete the entries.
1916          */
1917         if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
1918                 hns3vf_configure_all_mc_mac_addr(hns, true);
1919         rte_spinlock_unlock(&hw->lock);
1920
1921         return 0;
1922 }
1923
1924 static int
1925 hns3vf_start_service(struct hns3_adapter *hns)
1926 {
1927         struct hns3_hw *hw = &hns->hw;
1928         struct rte_eth_dev *eth_dev;
1929
1930         eth_dev = &rte_eth_devices[hw->data->port_id];
1931         hns3_set_rxtx_function(eth_dev);
1932         hns3_mp_req_start_rxtx(eth_dev);
1933         if (hw->adapter_state == HNS3_NIC_STARTED)
1934                 hns3vf_service_handler(eth_dev);
1935
1936         return 0;
1937 }
1938
1939 static int
1940 hns3vf_check_default_mac_change(struct hns3_hw *hw)
1941 {
1942         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1943         struct rte_ether_addr *hw_mac;
1944         int ret;
1945
1946         /*
1947          * The hns3 PF ethdev driver in kernel support setting VF MAC address
1948          * on the host by "ip link set ..." command. If the hns3 PF kernel
1949          * ethdev driver sets the MAC address for VF device after the
1950          * initialization of the related VF device, the PF driver will notify
1951          * VF driver to reset VF device to make the new MAC address effective
1952          * immediately. The hns3 VF PMD driver should check whether the MAC
1953          * address has been changed by the PF kernel ethdev driver, if changed
1954          * VF driver should configure hardware using the new MAC address in the
1955          * recovering hardware configuration stage of the reset process.
1956          */
1957         ret = hns3vf_get_host_mac_addr(hw);
1958         if (ret)
1959                 return ret;
1960
1961         hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
1962         ret = rte_is_zero_ether_addr(hw_mac);
1963         if (ret) {
1964                 rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
1965         } else {
1966                 ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
1967                 if (!ret) {
1968                         rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
1969                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1970                                               &hw->data->mac_addrs[0]);
1971                         hns3_warn(hw, "Default MAC address has been changed to:"
1972                                   " %s by the host PF kernel ethdev driver",
1973                                   mac_str);
1974                 }
1975         }
1976
1977         return 0;
1978 }
1979
1980 static int
1981 hns3vf_restore_conf(struct hns3_adapter *hns)
1982 {
1983         struct hns3_hw *hw = &hns->hw;
1984         int ret;
1985
1986         ret = hns3vf_check_default_mac_change(hw);
1987         if (ret)
1988                 return ret;
1989
1990         ret = hns3vf_configure_mac_addr(hns, false);
1991         if (ret)
1992                 return ret;
1993
1994         ret = hns3vf_configure_all_mc_mac_addr(hns, false);
1995         if (ret)
1996                 goto err_mc_mac;
1997
1998         ret = hns3vf_restore_promisc(hns);
1999         if (ret)
2000                 goto err_vlan_table;
2001
2002         ret = hns3vf_restore_vlan_conf(hns);
2003         if (ret)
2004                 goto err_vlan_table;
2005
2006         if (hw->adapter_state == HNS3_NIC_STARTED) {
2007                 ret = hns3vf_do_start(hns, false);
2008                 if (ret)
2009                         goto err_vlan_table;
2010                 hns3_info(hw, "hns3vf dev restart successful!");
2011         } else if (hw->adapter_state == HNS3_NIC_STOPPING)
2012                 hw->adapter_state = HNS3_NIC_CONFIGURED;
2013         return 0;
2014
2015 err_vlan_table:
2016         hns3vf_configure_all_mc_mac_addr(hns, true);
2017 err_mc_mac:
2018         hns3vf_configure_mac_addr(hns, true);
2019         return ret;
2020 }
2021
2022 static enum hns3_reset_level
2023 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
2024 {
2025         enum hns3_reset_level reset_level;
2026
2027         /* return the highest priority reset level amongst all */
2028         if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
2029                 reset_level = HNS3_VF_RESET;
2030         else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
2031                 reset_level = HNS3_VF_FULL_RESET;
2032         else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
2033                 reset_level = HNS3_VF_PF_FUNC_RESET;
2034         else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
2035                 reset_level = HNS3_VF_FUNC_RESET;
2036         else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
2037                 reset_level = HNS3_FLR_RESET;
2038         else
2039                 reset_level = HNS3_NONE_RESET;
2040
2041         if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
2042                 return HNS3_NONE_RESET;
2043
2044         return reset_level;
2045 }
2046
2047 static void
2048 hns3vf_reset_service(void *param)
2049 {
2050         struct hns3_adapter *hns = (struct hns3_adapter *)param;
2051         struct hns3_hw *hw = &hns->hw;
2052         enum hns3_reset_level reset_level;
2053         struct timeval tv_delta;
2054         struct timeval tv_start;
2055         struct timeval tv;
2056         uint64_t msec;
2057
2058         /*
2059          * The interrupt is not triggered within the delay time.
2060          * The interrupt may have been lost. It is necessary to handle
2061          * the interrupt to recover from the error.
2062          */
2063         if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
2064                 rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
2065                 hns3_err(hw, "Handling interrupts in delayed tasks");
2066                 hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
2067                 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2068                 if (reset_level == HNS3_NONE_RESET) {
2069                         hns3_err(hw, "No reset level is set, try global reset");
2070                         hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
2071                 }
2072         }
2073         rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
2074
2075         /*
2076          * Hardware reset has been notified, we now have to poll & check if
2077          * hardware has actually completed the reset sequence.
2078          */
2079         reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2080         if (reset_level != HNS3_NONE_RESET) {
2081                 gettimeofday(&tv_start, NULL);
2082                 hns3_reset_process(hns, reset_level);
2083                 gettimeofday(&tv, NULL);
2084                 timersub(&tv, &tv_start, &tv_delta);
2085                 msec = tv_delta.tv_sec * MSEC_PER_SEC +
2086                        tv_delta.tv_usec / USEC_PER_MSEC;
2087                 if (msec > HNS3_RESET_PROCESS_MS)
2088                         hns3_err(hw, "%d handle long time delta %" PRIx64
2089                                  " ms time=%ld.%.6ld",
2090                                  hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
2091         }
2092 }
2093
2094 static int
2095 hns3vf_reinit_dev(struct hns3_adapter *hns)
2096 {
2097         struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
2098         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2099         struct hns3_hw *hw = &hns->hw;
2100         int ret;
2101
2102         if (hw->reset.level == HNS3_VF_FULL_RESET) {
2103                 rte_intr_disable(&pci_dev->intr_handle);
2104                 hns3vf_set_bus_master(pci_dev, true);
2105         }
2106
2107         /* Firmware command initialize */
2108         ret = hns3_cmd_init(hw);
2109         if (ret) {
2110                 hns3_err(hw, "Failed to init cmd: %d", ret);
2111                 goto err_cmd_init;
2112         }
2113
2114         if (hw->reset.level == HNS3_VF_FULL_RESET) {
2115                 /*
2116                  * UIO enables msix by writing the pcie configuration space
2117                  * vfio_pci enables msix in rte_intr_enable.
2118                  */
2119                 if (pci_dev->kdrv == RTE_KDRV_IGB_UIO ||
2120                     pci_dev->kdrv == RTE_KDRV_UIO_GENERIC) {
2121                         if (hns3vf_enable_msix(pci_dev, true))
2122                                 hns3_err(hw, "Failed to enable msix");
2123                 }
2124
2125                 rte_intr_enable(&pci_dev->intr_handle);
2126         }
2127
2128         ret = hns3_reset_all_queues(hns);
2129         if (ret) {
2130                 hns3_err(hw, "Failed to reset all queues: %d", ret);
2131                 goto err_init;
2132         }
2133
2134         ret = hns3vf_init_hardware(hns);
2135         if (ret) {
2136                 hns3_err(hw, "Failed to init hardware: %d", ret);
2137                 goto err_init;
2138         }
2139
2140         return 0;
2141
2142 err_cmd_init:
2143         hns3vf_set_bus_master(pci_dev, false);
2144 err_init:
2145         hns3_cmd_uninit(hw);
2146         return ret;
2147 }
2148
2149 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
2150         .dev_start          = hns3vf_dev_start,
2151         .dev_stop           = hns3vf_dev_stop,
2152         .dev_close          = hns3vf_dev_close,
2153         .mtu_set            = hns3vf_dev_mtu_set,
2154         .promiscuous_enable = hns3vf_dev_promiscuous_enable,
2155         .promiscuous_disable = hns3vf_dev_promiscuous_disable,
2156         .allmulticast_enable = hns3vf_dev_allmulticast_enable,
2157         .allmulticast_disable = hns3vf_dev_allmulticast_disable,
2158         .stats_get          = hns3_stats_get,
2159         .stats_reset        = hns3_stats_reset,
2160         .xstats_get         = hns3_dev_xstats_get,
2161         .xstats_get_names   = hns3_dev_xstats_get_names,
2162         .xstats_reset       = hns3_dev_xstats_reset,
2163         .xstats_get_by_id   = hns3_dev_xstats_get_by_id,
2164         .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
2165         .dev_infos_get      = hns3vf_dev_infos_get,
2166         .rx_queue_setup     = hns3_rx_queue_setup,
2167         .tx_queue_setup     = hns3_tx_queue_setup,
2168         .rx_queue_release   = hns3_dev_rx_queue_release,
2169         .tx_queue_release   = hns3_dev_tx_queue_release,
2170         .rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
2171         .rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
2172         .dev_configure      = hns3vf_dev_configure,
2173         .mac_addr_add       = hns3vf_add_mac_addr,
2174         .mac_addr_remove    = hns3vf_remove_mac_addr,
2175         .mac_addr_set       = hns3vf_set_default_mac_addr,
2176         .set_mc_addr_list   = hns3vf_set_mc_mac_addr_list,
2177         .link_update        = hns3vf_dev_link_update,
2178         .rss_hash_update    = hns3_dev_rss_hash_update,
2179         .rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
2180         .reta_update        = hns3_dev_rss_reta_update,
2181         .reta_query         = hns3_dev_rss_reta_query,
2182         .filter_ctrl        = hns3_dev_filter_ctrl,
2183         .vlan_filter_set    = hns3vf_vlan_filter_set,
2184         .vlan_offload_set   = hns3vf_vlan_offload_set,
2185         .get_reg            = hns3_get_regs,
2186         .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
2187 };
2188
2189 static const struct hns3_reset_ops hns3vf_reset_ops = {
2190         .reset_service       = hns3vf_reset_service,
2191         .stop_service        = hns3vf_stop_service,
2192         .prepare_reset       = hns3vf_prepare_reset,
2193         .wait_hardware_ready = hns3vf_wait_hardware_ready,
2194         .reinit_dev          = hns3vf_reinit_dev,
2195         .restore_conf        = hns3vf_restore_conf,
2196         .start_service       = hns3vf_start_service,
2197 };
2198
2199 static int
2200 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
2201 {
2202         struct hns3_adapter *hns = eth_dev->data->dev_private;
2203         struct hns3_hw *hw = &hns->hw;
2204         int ret;
2205
2206         PMD_INIT_FUNC_TRACE();
2207
2208         eth_dev->process_private = (struct hns3_process_private *)
2209             rte_zmalloc_socket("hns3_filter_list",
2210                                sizeof(struct hns3_process_private),
2211                                RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
2212         if (eth_dev->process_private == NULL) {
2213                 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
2214                 return -ENOMEM;
2215         }
2216
2217         /* initialize flow filter lists */
2218         hns3_filterlist_init(eth_dev);
2219
2220         hns3_set_rxtx_function(eth_dev);
2221         eth_dev->dev_ops = &hns3vf_eth_dev_ops;
2222         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2223                 hns3_mp_init_secondary();
2224                 hw->secondary_cnt++;
2225                 return 0;
2226         }
2227
2228         hns3_mp_init_primary();
2229
2230         hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2231         hns->is_vf = true;
2232         hw->data = eth_dev->data;
2233
2234         ret = hns3_reset_init(hw);
2235         if (ret)
2236                 goto err_init_reset;
2237         hw->reset.ops = &hns3vf_reset_ops;
2238
2239         ret = hns3vf_init_vf(eth_dev);
2240         if (ret) {
2241                 PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2242                 goto err_init_vf;
2243         }
2244
2245         /* Allocate memory for storing MAC addresses */
2246         eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac",
2247                                                sizeof(struct rte_ether_addr) *
2248                                                HNS3_VF_UC_MACADDR_NUM, 0);
2249         if (eth_dev->data->mac_addrs == NULL) {
2250                 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
2251                              "to store MAC addresses",
2252                              sizeof(struct rte_ether_addr) *
2253                              HNS3_VF_UC_MACADDR_NUM);
2254                 ret = -ENOMEM;
2255                 goto err_rte_zmalloc;
2256         }
2257
2258         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
2259                             &eth_dev->data->mac_addrs[0]);
2260         hw->adapter_state = HNS3_NIC_INITIALIZED;
2261         /*
2262          * Pass the information to the rte_eth_dev_close() that it should also
2263          * release the private port resources.
2264          */
2265         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2266
2267         if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
2268                 hns3_err(hw, "Reschedule reset service after dev_init");
2269                 hns3_schedule_reset(hns);
2270         } else {
2271                 /* IMP will wait ready flag before reset */
2272                 hns3_notify_reset_ready(hw, false);
2273         }
2274         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
2275                           eth_dev);
2276         return 0;
2277
2278 err_rte_zmalloc:
2279         hns3vf_uninit_vf(eth_dev);
2280
2281 err_init_vf:
2282         rte_free(hw->reset.wait_data);
2283
2284 err_init_reset:
2285         eth_dev->dev_ops = NULL;
2286         eth_dev->rx_pkt_burst = NULL;
2287         eth_dev->tx_pkt_burst = NULL;
2288         eth_dev->tx_pkt_prepare = NULL;
2289         rte_free(eth_dev->process_private);
2290         eth_dev->process_private = NULL;
2291
2292         return ret;
2293 }
2294
2295 static int
2296 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
2297 {
2298         struct hns3_adapter *hns = eth_dev->data->dev_private;
2299         struct hns3_hw *hw = &hns->hw;
2300
2301         PMD_INIT_FUNC_TRACE();
2302
2303         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2304                 return -EPERM;
2305
2306         eth_dev->dev_ops = NULL;
2307         eth_dev->rx_pkt_burst = NULL;
2308         eth_dev->tx_pkt_burst = NULL;
2309         eth_dev->tx_pkt_prepare = NULL;
2310
2311         if (hw->adapter_state < HNS3_NIC_CLOSING)
2312                 hns3vf_dev_close(eth_dev);
2313
2314         hw->adapter_state = HNS3_NIC_REMOVED;
2315         return 0;
2316 }
2317
2318 static int
2319 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2320                      struct rte_pci_device *pci_dev)
2321 {
2322         return rte_eth_dev_pci_generic_probe(pci_dev,
2323                                              sizeof(struct hns3_adapter),
2324                                              hns3vf_dev_init);
2325 }
2326
2327 static int
2328 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
2329 {
2330         return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
2331 }
2332
2333 static const struct rte_pci_id pci_id_hns3vf_map[] = {
2334         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
2335         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
2336         { .vendor_id = 0, /* sentinel */ },
2337 };
2338
2339 static struct rte_pci_driver rte_hns3vf_pmd = {
2340         .id_table = pci_id_hns3vf_map,
2341         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2342         .probe = eth_hns3vf_pci_probe,
2343         .remove = eth_hns3vf_pci_remove,
2344 };
2345
2346 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
2347 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
2348 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");