net/hns3: log time delta in decimal format
[dpdk.git] / drivers / net / hns3 / hns3_ethdev_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <linux/pci_regs.h>
6 #include <rte_alarm.h>
7 #include <ethdev_pci.h>
8 #include <rte_io.h>
9 #include <rte_pci.h>
10 #include <rte_vfio.h>
11
12 #include "hns3_ethdev.h"
13 #include "hns3_logs.h"
14 #include "hns3_rxtx.h"
15 #include "hns3_regs.h"
16 #include "hns3_intr.h"
17 #include "hns3_dcb.h"
18 #include "hns3_mp.h"
19
20 #define HNS3VF_KEEP_ALIVE_INTERVAL      2000000 /* us */
21 #define HNS3VF_SERVICE_INTERVAL         1000000 /* us */
22
23 #define HNS3VF_RESET_WAIT_MS    20
24 #define HNS3VF_RESET_WAIT_CNT   2000
25
26 /* Reset related Registers */
27 #define HNS3_GLOBAL_RESET_BIT           0
28 #define HNS3_CORE_RESET_BIT             1
29 #define HNS3_IMP_RESET_BIT              2
30 #define HNS3_FUN_RST_ING_B              0
31
32 enum hns3vf_evt_cause {
33         HNS3VF_VECTOR0_EVENT_RST,
34         HNS3VF_VECTOR0_EVENT_MBX,
35         HNS3VF_VECTOR0_EVENT_OTHER,
36 };
37
38 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
39                                                     uint64_t *levels);
40 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
41 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
42
43 static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
44                                   struct rte_ether_addr *mac_addr);
45 static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
46                                      struct rte_ether_addr *mac_addr);
47 static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
48                                    __rte_unused int wait_to_complete);
49
50 /* set PCI bus mastering */
51 static int
52 hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
53 {
54         uint16_t reg;
55         int ret;
56
57         ret = rte_pci_read_config(device, &reg, sizeof(reg), PCI_COMMAND);
58         if (ret < 0) {
59                 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
60                              PCI_COMMAND);
61                 return ret;
62         }
63
64         if (op)
65                 /* set the master bit */
66                 reg |= PCI_COMMAND_MASTER;
67         else
68                 reg &= ~(PCI_COMMAND_MASTER);
69
70         return rte_pci_write_config(device, &reg, sizeof(reg), PCI_COMMAND);
71 }
72
73 /**
74  * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
75  * @cap: the capability
76  *
77  * Return the address of the given capability within the PCI capability list.
78  */
79 static int
80 hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
81 {
82 #define MAX_PCIE_CAPABILITY 48
83         uint16_t status;
84         uint8_t pos;
85         uint8_t id;
86         int ttl;
87         int ret;
88
89         ret = rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
90         if (ret < 0) {
91                 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_STATUS);
92                 return 0;
93         }
94
95         if (!(status & PCI_STATUS_CAP_LIST))
96                 return 0;
97
98         ttl = MAX_PCIE_CAPABILITY;
99         ret = rte_pci_read_config(device, &pos, sizeof(pos),
100                                   PCI_CAPABILITY_LIST);
101         if (ret < 0) {
102                 PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
103                              PCI_CAPABILITY_LIST);
104                 return 0;
105         }
106
107         while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
108                 ret = rte_pci_read_config(device, &id, sizeof(id),
109                                           (pos + PCI_CAP_LIST_ID));
110                 if (ret < 0) {
111                         PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
112                                      (pos + PCI_CAP_LIST_ID));
113                         break;
114                 }
115
116                 if (id == 0xFF)
117                         break;
118
119                 if (id == cap)
120                         return (int)pos;
121
122                 ret = rte_pci_read_config(device, &pos, sizeof(pos),
123                                           (pos + PCI_CAP_LIST_NEXT));
124                 if (ret < 0) {
125                         PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
126                                      (pos + PCI_CAP_LIST_NEXT));
127                         break;
128                 }
129         }
130         return 0;
131 }
132
133 static int
134 hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
135 {
136         uint16_t control;
137         int pos;
138         int ret;
139
140         pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
141         if (pos) {
142                 ret = rte_pci_read_config(device, &control, sizeof(control),
143                                     (pos + PCI_MSIX_FLAGS));
144                 if (ret < 0) {
145                         PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x",
146                                      (pos + PCI_MSIX_FLAGS));
147                         return -ENXIO;
148                 }
149
150                 if (op)
151                         control |= PCI_MSIX_FLAGS_ENABLE;
152                 else
153                         control &= ~PCI_MSIX_FLAGS_ENABLE;
154                 ret = rte_pci_write_config(device, &control, sizeof(control),
155                                           (pos + PCI_MSIX_FLAGS));
156                 if (ret < 0) {
157                         PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x",
158                                     (pos + PCI_MSIX_FLAGS));
159                 }
160                 return 0;
161         }
162         return -ENXIO;
163 }
164
165 static int
166 hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
167 {
168         /* mac address was checked by upper level interface */
169         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
170         int ret;
171
172         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
173                                 HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
174                                 RTE_ETHER_ADDR_LEN, false, NULL, 0);
175         if (ret) {
176                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
177                                       mac_addr);
178                 hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
179                          mac_str, ret);
180         }
181         return ret;
182 }
183
184 static int
185 hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
186 {
187         /* mac address was checked by upper level interface */
188         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
189         int ret;
190
191         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
192                                 HNS3_MBX_MAC_VLAN_UC_REMOVE,
193                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN,
194                                 false, NULL, 0);
195         if (ret) {
196                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
197                                       mac_addr);
198                 hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
199                          mac_str, ret);
200         }
201         return ret;
202 }
203
204 static int
205 hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
206 {
207         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
208         struct rte_ether_addr *addr;
209         int ret;
210         int i;
211
212         for (i = 0; i < hw->mc_addrs_num; i++) {
213                 addr = &hw->mc_addrs[i];
214                 /* Check if there are duplicate addresses */
215                 if (rte_is_same_ether_addr(addr, mac_addr)) {
216                         hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
217                                               addr);
218                         hns3_err(hw, "failed to add mc mac addr, same addrs"
219                                  "(%s) is added by the set_mc_mac_addr_list "
220                                  "API", mac_str);
221                         return -EINVAL;
222                 }
223         }
224
225         ret = hns3vf_add_mc_mac_addr(hw, mac_addr);
226         if (ret) {
227                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
228                                       mac_addr);
229                 hns3_err(hw, "failed to add mc mac addr(%s), ret = %d",
230                          mac_str, ret);
231         }
232         return ret;
233 }
234
235 static int
236 hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
237                     __rte_unused uint32_t idx,
238                     __rte_unused uint32_t pool)
239 {
240         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
241         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
242         int ret;
243
244         rte_spinlock_lock(&hw->lock);
245
246         /*
247          * In hns3 network engine adding UC and MC mac address with different
248          * commands with firmware. We need to determine whether the input
249          * address is a UC or a MC address to call different commands.
250          * By the way, it is recommended calling the API function named
251          * rte_eth_dev_set_mc_addr_list to set the MC mac address, because
252          * using the rte_eth_dev_mac_addr_add API function to set MC mac address
253          * may affect the specifications of UC mac addresses.
254          */
255         if (rte_is_multicast_ether_addr(mac_addr))
256                 ret = hns3vf_add_mc_addr_common(hw, mac_addr);
257         else
258                 ret = hns3vf_add_uc_mac_addr(hw, mac_addr);
259
260         rte_spinlock_unlock(&hw->lock);
261         if (ret) {
262                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
263                                       mac_addr);
264                 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
265                          ret);
266         }
267
268         return ret;
269 }
270
271 static void
272 hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
273 {
274         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
275         /* index will be checked by upper level rte interface */
276         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
277         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
278         int ret;
279
280         rte_spinlock_lock(&hw->lock);
281
282         if (rte_is_multicast_ether_addr(mac_addr))
283                 ret = hns3vf_remove_mc_mac_addr(hw, mac_addr);
284         else
285                 ret = hns3vf_remove_uc_mac_addr(hw, mac_addr);
286
287         rte_spinlock_unlock(&hw->lock);
288         if (ret) {
289                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
290                                       mac_addr);
291                 hns3_err(hw, "failed to remove mac addr(%s), ret = %d",
292                          mac_str, ret);
293         }
294 }
295
296 static int
297 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
298                             struct rte_ether_addr *mac_addr)
299 {
300 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
301         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
302         struct rte_ether_addr *old_addr;
303         uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
304         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
305         int ret;
306
307         /*
308          * It has been guaranteed that input parameter named mac_addr is valid
309          * address in the rte layer of DPDK framework.
310          */
311         old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
312         rte_spinlock_lock(&hw->lock);
313         memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
314         memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
315                RTE_ETHER_ADDR_LEN);
316
317         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
318                                 HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
319                                 HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
320         if (ret) {
321                 /*
322                  * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev
323                  * driver. When user has configured a MAC address for VF device
324                  * by "ip link set ..." command based on the PF device, the hns3
325                  * PF kernel ethdev driver does not allow VF driver to request
326                  * reconfiguring a different default MAC address, and return
327                  * -EPREM to VF driver through mailbox.
328                  */
329                 if (ret == -EPERM) {
330                         hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
331                                               old_addr);
332                         hns3_warn(hw, "Has permanet mac addr(%s) for vf",
333                                   mac_str);
334                 } else {
335                         hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
336                                               mac_addr);
337                         hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
338                                  mac_str, ret);
339                 }
340         }
341
342         rte_ether_addr_copy(mac_addr,
343                             (struct rte_ether_addr *)hw->mac.mac_addr);
344         rte_spinlock_unlock(&hw->lock);
345
346         return ret;
347 }
348
349 static int
350 hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del)
351 {
352         struct hns3_hw *hw = &hns->hw;
353         struct rte_ether_addr *addr;
354         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
355         int err = 0;
356         int ret;
357         int i;
358
359         for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) {
360                 addr = &hw->data->mac_addrs[i];
361                 if (rte_is_zero_ether_addr(addr))
362                         continue;
363                 if (rte_is_multicast_ether_addr(addr))
364                         ret = del ? hns3vf_remove_mc_mac_addr(hw, addr) :
365                               hns3vf_add_mc_mac_addr(hw, addr);
366                 else
367                         ret = del ? hns3vf_remove_uc_mac_addr(hw, addr) :
368                               hns3vf_add_uc_mac_addr(hw, addr);
369
370                 if (ret) {
371                         err = ret;
372                         hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
373                                               addr);
374                         hns3_err(hw, "failed to %s mac addr(%s) index:%d "
375                                  "ret = %d.", del ? "remove" : "restore",
376                                  mac_str, i, ret);
377                 }
378         }
379         return err;
380 }
381
382 static int
383 hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
384                        struct rte_ether_addr *mac_addr)
385 {
386         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
387         int ret;
388
389         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
390                                 HNS3_MBX_MAC_VLAN_MC_ADD,
391                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
392                                 NULL, 0);
393         if (ret) {
394                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
395                                       mac_addr);
396                 hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
397                          mac_str, ret);
398         }
399
400         return ret;
401 }
402
403 static int
404 hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
405                           struct rte_ether_addr *mac_addr)
406 {
407         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
408         int ret;
409
410         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
411                                 HNS3_MBX_MAC_VLAN_MC_REMOVE,
412                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
413                                 NULL, 0);
414         if (ret) {
415                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
416                                       mac_addr);
417                 hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
418                          mac_str, ret);
419         }
420
421         return ret;
422 }
423
424 static int
425 hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw,
426                              struct rte_ether_addr *mc_addr_set,
427                              uint32_t nb_mc_addr)
428 {
429         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
430         struct rte_ether_addr *addr;
431         uint32_t i;
432         uint32_t j;
433
434         if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
435                 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
436                          "invalid. valid range: 0~%d",
437                          nb_mc_addr, HNS3_MC_MACADDR_NUM);
438                 return -EINVAL;
439         }
440
441         /* Check if input mac addresses are valid */
442         for (i = 0; i < nb_mc_addr; i++) {
443                 addr = &mc_addr_set[i];
444                 if (!rte_is_multicast_ether_addr(addr)) {
445                         hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
446                                               addr);
447                         hns3_err(hw,
448                                  "failed to set mc mac addr, addr(%s) invalid.",
449                                  mac_str);
450                         return -EINVAL;
451                 }
452
453                 /* Check if there are duplicate addresses */
454                 for (j = i + 1; j < nb_mc_addr; j++) {
455                         if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
456                                 hns3_ether_format_addr(mac_str,
457                                                       RTE_ETHER_ADDR_FMT_SIZE,
458                                                       addr);
459                                 hns3_err(hw, "failed to set mc mac addr, "
460                                          "addrs invalid. two same addrs(%s).",
461                                          mac_str);
462                                 return -EINVAL;
463                         }
464                 }
465
466                 /*
467                  * Check if there are duplicate addresses between mac_addrs
468                  * and mc_addr_set
469                  */
470                 for (j = 0; j < HNS3_VF_UC_MACADDR_NUM; j++) {
471                         if (rte_is_same_ether_addr(addr,
472                                                    &hw->data->mac_addrs[j])) {
473                                 hns3_ether_format_addr(mac_str,
474                                                       RTE_ETHER_ADDR_FMT_SIZE,
475                                                       addr);
476                                 hns3_err(hw, "failed to set mc mac addr, "
477                                          "addrs invalid. addrs(%s) has already "
478                                          "configured in mac_addr add API",
479                                          mac_str);
480                                 return -EINVAL;
481                         }
482                 }
483         }
484
485         return 0;
486 }
487
488 static int
489 hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev,
490                             struct rte_ether_addr *mc_addr_set,
491                             uint32_t nb_mc_addr)
492 {
493         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
494         struct rte_ether_addr *addr;
495         int cur_addr_num;
496         int set_addr_num;
497         int num;
498         int ret;
499         int i;
500
501         ret = hns3vf_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
502         if (ret)
503                 return ret;
504
505         rte_spinlock_lock(&hw->lock);
506         cur_addr_num = hw->mc_addrs_num;
507         for (i = 0; i < cur_addr_num; i++) {
508                 num = cur_addr_num - i - 1;
509                 addr = &hw->mc_addrs[num];
510                 ret = hns3vf_remove_mc_mac_addr(hw, addr);
511                 if (ret) {
512                         rte_spinlock_unlock(&hw->lock);
513                         return ret;
514                 }
515
516                 hw->mc_addrs_num--;
517         }
518
519         set_addr_num = (int)nb_mc_addr;
520         for (i = 0; i < set_addr_num; i++) {
521                 addr = &mc_addr_set[i];
522                 ret = hns3vf_add_mc_mac_addr(hw, addr);
523                 if (ret) {
524                         rte_spinlock_unlock(&hw->lock);
525                         return ret;
526                 }
527
528                 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
529                 hw->mc_addrs_num++;
530         }
531         rte_spinlock_unlock(&hw->lock);
532
533         return 0;
534 }
535
536 static int
537 hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
538 {
539         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
540         struct hns3_hw *hw = &hns->hw;
541         struct rte_ether_addr *addr;
542         int err = 0;
543         int ret;
544         int i;
545
546         for (i = 0; i < hw->mc_addrs_num; i++) {
547                 addr = &hw->mc_addrs[i];
548                 if (!rte_is_multicast_ether_addr(addr))
549                         continue;
550                 if (del)
551                         ret = hns3vf_remove_mc_mac_addr(hw, addr);
552                 else
553                         ret = hns3vf_add_mc_mac_addr(hw, addr);
554                 if (ret) {
555                         err = ret;
556                         hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
557                                               addr);
558                         hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d",
559                                  del ? "Remove" : "Restore", mac_str, ret);
560                 }
561         }
562         return err;
563 }
564
565 static int
566 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
567                         bool en_uc_pmc, bool en_mc_pmc)
568 {
569         struct hns3_mbx_vf_to_pf_cmd *req;
570         struct hns3_cmd_desc desc;
571         int ret;
572
573         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
574
575         /*
576          * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev driver,
577          * so there are some features for promiscuous/allmulticast mode in hns3
578          * VF PMD driver as below:
579          * 1. The promiscuous/allmulticast mode can be configured successfully
580          *    only based on the trusted VF device. If based on the non trusted
581          *    VF device, configuring promiscuous/allmulticast mode will fail.
582          *    The hns3 VF device can be confiruged as trusted device by hns3 PF
583          *    kernel ethdev driver on the host by the following command:
584          *      "ip link set <eth num> vf <vf id> turst on"
585          * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
586          *    driver can receive the ingress and outgoing traffic. In the words,
587          *    all the ingress packets, all the packets sent from the PF and
588          *    other VFs on the same physical port.
589          * 3. Note: Because of the hardware constraints, By default vlan filter
590          *    is enabled and couldn't be turned off based on VF device, so vlan
591          *    filter is still effective even in promiscuous mode. If upper
592          *    applications don't call rte_eth_dev_vlan_filter API function to
593          *    set vlan based on VF device, hns3 VF PMD driver will can't receive
594          *    the packets with vlan tag in promiscuoue mode.
595          */
596         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
597         req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
598         req->msg[1] = en_bc_pmc ? 1 : 0;
599         req->msg[2] = en_uc_pmc ? 1 : 0;
600         req->msg[3] = en_mc_pmc ? 1 : 0;
601         req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0;
602
603         ret = hns3_cmd_send(hw, &desc, 1);
604         if (ret)
605                 hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
606
607         return ret;
608 }
609
610 static int
611 hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
612 {
613         struct hns3_adapter *hns = dev->data->dev_private;
614         struct hns3_hw *hw = &hns->hw;
615         int ret;
616
617         ret = hns3vf_set_promisc_mode(hw, true, true, true);
618         if (ret)
619                 hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
620                         ret);
621         return ret;
622 }
623
624 static int
625 hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
626 {
627         bool allmulti = dev->data->all_multicast ? true : false;
628         struct hns3_adapter *hns = dev->data->dev_private;
629         struct hns3_hw *hw = &hns->hw;
630         int ret;
631
632         ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
633         if (ret)
634                 hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
635                         ret);
636         return ret;
637 }
638
639 static int
640 hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
641 {
642         struct hns3_adapter *hns = dev->data->dev_private;
643         struct hns3_hw *hw = &hns->hw;
644         int ret;
645
646         if (dev->data->promiscuous)
647                 return 0;
648
649         ret = hns3vf_set_promisc_mode(hw, true, false, true);
650         if (ret)
651                 hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
652                         ret);
653         return ret;
654 }
655
656 static int
657 hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
658 {
659         struct hns3_adapter *hns = dev->data->dev_private;
660         struct hns3_hw *hw = &hns->hw;
661         int ret;
662
663         if (dev->data->promiscuous)
664                 return 0;
665
666         ret = hns3vf_set_promisc_mode(hw, true, false, false);
667         if (ret)
668                 hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
669                         ret);
670         return ret;
671 }
672
673 static int
674 hns3vf_restore_promisc(struct hns3_adapter *hns)
675 {
676         struct hns3_hw *hw = &hns->hw;
677         bool allmulti = hw->data->all_multicast ? true : false;
678
679         if (hw->data->promiscuous)
680                 return hns3vf_set_promisc_mode(hw, true, true, true);
681
682         return hns3vf_set_promisc_mode(hw, true, false, allmulti);
683 }
684
685 static int
686 hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
687                              bool mmap, enum hns3_ring_type queue_type,
688                              uint16_t queue_id)
689 {
690         struct hns3_vf_bind_vector_msg bind_msg;
691         const char *op_str;
692         uint16_t code;
693         int ret;
694
695         memset(&bind_msg, 0, sizeof(bind_msg));
696         code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
697                 HNS3_MBX_UNMAP_RING_TO_VECTOR;
698         bind_msg.vector_id = vector_id;
699
700         if (queue_type == HNS3_RING_TYPE_RX)
701                 bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
702         else
703                 bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
704
705         bind_msg.param[0].ring_type = queue_type;
706         bind_msg.ring_num = 1;
707         bind_msg.param[0].tqp_index = queue_id;
708         op_str = mmap ? "Map" : "Unmap";
709         ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
710                                 sizeof(bind_msg), false, NULL, 0);
711         if (ret)
712                 hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.",
713                          op_str, queue_id, bind_msg.vector_id, ret);
714
715         return ret;
716 }
717
718 static int
719 hns3vf_init_ring_with_vector(struct hns3_hw *hw)
720 {
721         uint16_t vec;
722         int ret;
723         int i;
724
725         /*
726          * In hns3 network engine, vector 0 is always the misc interrupt of this
727          * function, vector 1~N can be used respectively for the queues of the
728          * function. Tx and Rx queues with the same number share the interrupt
729          * vector. In the initialization clearing the all hardware mapping
730          * relationship configurations between queues and interrupt vectors is
731          * needed, so some error caused by the residual configurations, such as
732          * the unexpected Tx interrupt, can be avoid.
733          */
734         vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
735         if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
736                 vec = vec - 1; /* the last interrupt is reserved */
737         hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
738         for (i = 0; i < hw->intr_tqps_num; i++) {
739                 /*
740                  * Set gap limiter/rate limiter/quanity limiter algorithm
741                  * configuration for interrupt coalesce of queue's interrupt.
742                  */
743                 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
744                                        HNS3_TQP_INTR_GL_DEFAULT);
745                 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
746                                        HNS3_TQP_INTR_GL_DEFAULT);
747                 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
748                 /*
749                  * QL(quantity limiter) is not used currently, just set 0 to
750                  * close it.
751                  */
752                 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
753
754                 ret = hns3vf_bind_ring_with_vector(hw, vec, false,
755                                                    HNS3_RING_TYPE_TX, i);
756                 if (ret) {
757                         PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
758                                           "vector: %u, ret=%d", i, vec, ret);
759                         return ret;
760                 }
761
762                 ret = hns3vf_bind_ring_with_vector(hw, vec, false,
763                                                    HNS3_RING_TYPE_RX, i);
764                 if (ret) {
765                         PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
766                                           "vector: %u, ret=%d", i, vec, ret);
767                         return ret;
768                 }
769         }
770
771         return 0;
772 }
773
774 static int
775 hns3vf_dev_configure(struct rte_eth_dev *dev)
776 {
777         struct hns3_adapter *hns = dev->data->dev_private;
778         struct hns3_hw *hw = &hns->hw;
779         struct rte_eth_conf *conf = &dev->data->dev_conf;
780         enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
781         uint16_t nb_rx_q = dev->data->nb_rx_queues;
782         uint16_t nb_tx_q = dev->data->nb_tx_queues;
783         struct rte_eth_rss_conf rss_conf;
784         uint32_t max_rx_pkt_len;
785         uint16_t mtu;
786         bool gro_en;
787         int ret;
788
789         hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
790
791         /*
792          * Some versions of hardware network engine does not support
793          * individually enable/disable/reset the Tx or Rx queue. These devices
794          * must enable/disable/reset Tx and Rx queues at the same time. When the
795          * numbers of Tx queues allocated by upper applications are not equal to
796          * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
797          * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
798          * work as usual. But these fake queues are imperceptible, and can not
799          * be used by upper applications.
800          */
801         if (!hns3_dev_indep_txrx_supported(hw)) {
802                 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
803                 if (ret) {
804                         hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.",
805                                  ret);
806                         return ret;
807                 }
808         }
809
810         hw->adapter_state = HNS3_NIC_CONFIGURING;
811         if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
812                 hns3_err(hw, "setting link speed/duplex not supported");
813                 ret = -EINVAL;
814                 goto cfg_err;
815         }
816
817         /* When RSS is not configured, redirect the packet queue 0 */
818         if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
819                 conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
820                 hw->rss_dis_flag = false;
821                 rss_conf = conf->rx_adv_conf.rss_conf;
822                 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
823                 if (ret)
824                         goto cfg_err;
825         }
826
827         /*
828          * If jumbo frames are enabled, MTU needs to be refreshed
829          * according to the maximum RX packet length.
830          */
831         if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
832                 max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
833                 if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
834                     max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
835                         hns3_err(hw, "maximum Rx packet length must be greater "
836                                  "than %u and less than %u when jumbo frame enabled.",
837                                  (uint16_t)HNS3_DEFAULT_FRAME_LEN,
838                                  (uint16_t)HNS3_MAX_FRAME_LEN);
839                         ret = -EINVAL;
840                         goto cfg_err;
841                 }
842
843                 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
844                 ret = hns3vf_dev_mtu_set(dev, mtu);
845                 if (ret)
846                         goto cfg_err;
847                 dev->data->mtu = mtu;
848         }
849
850         ret = hns3vf_dev_configure_vlan(dev);
851         if (ret)
852                 goto cfg_err;
853
854         /* config hardware GRO */
855         gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
856         ret = hns3_config_gro(hw, gro_en);
857         if (ret)
858                 goto cfg_err;
859
860         hns3_init_rx_ptype_tble(dev);
861
862         hw->adapter_state = HNS3_NIC_CONFIGURED;
863         return 0;
864
865 cfg_err:
866         (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
867         hw->adapter_state = HNS3_NIC_INITIALIZED;
868
869         return ret;
870 }
871
872 static int
873 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
874 {
875         int ret;
876
877         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
878                                 sizeof(mtu), true, NULL, 0);
879         if (ret)
880                 hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
881
882         return ret;
883 }
884
885 static int
886 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
887 {
888         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
889         uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
890         int ret;
891
892         /*
893          * The hns3 PF/VF devices on the same port share the hardware MTU
894          * configuration. Currently, we send mailbox to inform hns3 PF kernel
895          * ethdev driver to finish hardware MTU configuration in hns3 VF PMD
896          * driver, there is no need to stop the port for hns3 VF device, and the
897          * MTU value issued by hns3 VF PMD driver must be less than or equal to
898          * PF's MTU.
899          */
900         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
901                 hns3_err(hw, "Failed to set mtu during resetting");
902                 return -EIO;
903         }
904
905         /*
906          * when Rx of scattered packets is off, we have some possibility of
907          * using vector Rx process function or simple Rx functions in hns3 PMD
908          * driver. If the input MTU is increased and the maximum length of
909          * received packets is greater than the length of a buffer for Rx
910          * packet, the hardware network engine needs to use multiple BDs and
911          * buffers to store these packets. This will cause problems when still
912          * using vector Rx process function or simple Rx function to receiving
913          * packets. So, when Rx of scattered packets is off and device is
914          * started, it is not permitted to increase MTU so that the maximum
915          * length of Rx packets is greater than Rx buffer length.
916          */
917         if (dev->data->dev_started && !dev->data->scattered_rx &&
918             frame_size > hw->rx_buf_len) {
919                 hns3_err(hw, "failed to set mtu because current is "
920                         "not scattered rx mode");
921                 return -EOPNOTSUPP;
922         }
923
924         rte_spinlock_lock(&hw->lock);
925         ret = hns3vf_config_mtu(hw, mtu);
926         if (ret) {
927                 rte_spinlock_unlock(&hw->lock);
928                 return ret;
929         }
930         if (mtu > RTE_ETHER_MTU)
931                 dev->data->dev_conf.rxmode.offloads |=
932                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
933         else
934                 dev->data->dev_conf.rxmode.offloads &=
935                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
936         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
937         rte_spinlock_unlock(&hw->lock);
938
939         return 0;
940 }
941
942 static int
943 hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
944 {
945         struct hns3_adapter *hns = eth_dev->data->dev_private;
946         struct hns3_hw *hw = &hns->hw;
947         uint16_t q_num = hw->tqps_num;
948
949         /*
950          * In interrupt mode, 'max_rx_queues' is set based on the number of
951          * MSI-X interrupt resources of the hardware.
952          */
953         if (hw->data->dev_conf.intr_conf.rxq == 1)
954                 q_num = hw->intr_tqps_num;
955
956         info->max_rx_queues = q_num;
957         info->max_tx_queues = hw->tqps_num;
958         info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
959         info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
960         info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
961         info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
962         info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
963
964         info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
965                                  DEV_RX_OFFLOAD_UDP_CKSUM |
966                                  DEV_RX_OFFLOAD_TCP_CKSUM |
967                                  DEV_RX_OFFLOAD_SCTP_CKSUM |
968                                  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
969                                  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
970                                  DEV_RX_OFFLOAD_SCATTER |
971                                  DEV_RX_OFFLOAD_VLAN_STRIP |
972                                  DEV_RX_OFFLOAD_VLAN_FILTER |
973                                  DEV_RX_OFFLOAD_JUMBO_FRAME |
974                                  DEV_RX_OFFLOAD_RSS_HASH |
975                                  DEV_RX_OFFLOAD_TCP_LRO);
976         info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
977                                  DEV_TX_OFFLOAD_IPV4_CKSUM |
978                                  DEV_TX_OFFLOAD_TCP_CKSUM |
979                                  DEV_TX_OFFLOAD_UDP_CKSUM |
980                                  DEV_TX_OFFLOAD_SCTP_CKSUM |
981                                  DEV_TX_OFFLOAD_MULTI_SEGS |
982                                  DEV_TX_OFFLOAD_TCP_TSO |
983                                  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
984                                  DEV_TX_OFFLOAD_GRE_TNL_TSO |
985                                  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
986                                  DEV_TX_OFFLOAD_MBUF_FAST_FREE |
987                                  hns3_txvlan_cap_get(hw));
988
989         if (hns3_dev_outer_udp_cksum_supported(hw))
990                 info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
991
992         if (hns3_dev_indep_txrx_supported(hw))
993                 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
994                                  RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
995
996         info->rx_desc_lim = (struct rte_eth_desc_lim) {
997                 .nb_max = HNS3_MAX_RING_DESC,
998                 .nb_min = HNS3_MIN_RING_DESC,
999                 .nb_align = HNS3_ALIGN_RING_DESC,
1000         };
1001
1002         info->tx_desc_lim = (struct rte_eth_desc_lim) {
1003                 .nb_max = HNS3_MAX_RING_DESC,
1004                 .nb_min = HNS3_MIN_RING_DESC,
1005                 .nb_align = HNS3_ALIGN_RING_DESC,
1006                 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
1007                 .nb_mtu_seg_max = hw->max_non_tso_bd_num,
1008         };
1009
1010         info->default_rxconf = (struct rte_eth_rxconf) {
1011                 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
1012                 /*
1013                  * If there are no available Rx buffer descriptors, incoming
1014                  * packets are always dropped by hardware based on hns3 network
1015                  * engine.
1016                  */
1017                 .rx_drop_en = 1,
1018                 .offloads = 0,
1019         };
1020         info->default_txconf = (struct rte_eth_txconf) {
1021                 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
1022                 .offloads = 0,
1023         };
1024
1025         info->vmdq_queue_num = 0;
1026
1027         info->reta_size = hw->rss_ind_tbl_size;
1028         info->hash_key_size = HNS3_RSS_KEY_SIZE;
1029         info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
1030
1031         info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
1032         info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
1033         info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
1034         info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
1035         info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
1036         info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
1037
1038         return 0;
1039 }
1040
1041 static void
1042 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
1043 {
1044         hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
1045 }
1046
1047 static void
1048 hns3vf_disable_irq0(struct hns3_hw *hw)
1049 {
1050         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
1051 }
1052
1053 static void
1054 hns3vf_enable_irq0(struct hns3_hw *hw)
1055 {
1056         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
1057 }
1058
1059 static enum hns3vf_evt_cause
1060 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
1061 {
1062         struct hns3_hw *hw = &hns->hw;
1063         enum hns3vf_evt_cause ret;
1064         uint32_t cmdq_stat_reg;
1065         uint32_t rst_ing_reg;
1066         uint32_t val;
1067
1068         /* Fetch the events from their corresponding regs */
1069         cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
1070         if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
1071                 rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1072                 hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
1073                 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
1074                 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
1075                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1076                 hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
1077                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
1078                 if (clearval) {
1079                         hw->reset.stats.global_cnt++;
1080                         hns3_warn(hw, "Global reset detected, clear reset status");
1081                 } else {
1082                         hns3_schedule_delayed_reset(hns);
1083                         hns3_warn(hw, "Global reset detected, don't clear reset status");
1084                 }
1085
1086                 ret = HNS3VF_VECTOR0_EVENT_RST;
1087                 goto out;
1088         }
1089
1090         /* Check for vector0 mailbox(=CMDQ RX) event source */
1091         if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
1092                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
1093                 ret = HNS3VF_VECTOR0_EVENT_MBX;
1094                 goto out;
1095         }
1096
1097         val = 0;
1098         ret = HNS3VF_VECTOR0_EVENT_OTHER;
1099 out:
1100         if (clearval)
1101                 *clearval = val;
1102         return ret;
1103 }
1104
1105 static void
1106 hns3vf_interrupt_handler(void *param)
1107 {
1108         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1109         struct hns3_adapter *hns = dev->data->dev_private;
1110         struct hns3_hw *hw = &hns->hw;
1111         enum hns3vf_evt_cause event_cause;
1112         uint32_t clearval;
1113
1114         /* Disable interrupt */
1115         hns3vf_disable_irq0(hw);
1116
1117         /* Read out interrupt causes */
1118         event_cause = hns3vf_check_event_cause(hns, &clearval);
1119
1120         switch (event_cause) {
1121         case HNS3VF_VECTOR0_EVENT_RST:
1122                 hns3_schedule_reset(hns);
1123                 break;
1124         case HNS3VF_VECTOR0_EVENT_MBX:
1125                 hns3_dev_handle_mbx_msg(hw);
1126                 break;
1127         default:
1128                 break;
1129         }
1130
1131         /* Clear interrupt causes */
1132         hns3vf_clear_event_cause(hw, clearval);
1133
1134         /* Enable interrupt */
1135         hns3vf_enable_irq0(hw);
1136 }
1137
1138 static void
1139 hns3vf_set_default_dev_specifications(struct hns3_hw *hw)
1140 {
1141         hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
1142         hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
1143         hw->rss_key_size = HNS3_RSS_KEY_SIZE;
1144         hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
1145 }
1146
1147 static void
1148 hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
1149 {
1150         struct hns3_dev_specs_0_cmd *req0;
1151
1152         req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
1153
1154         hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
1155         hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
1156         hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
1157         hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
1158 }
1159
1160 static int
1161 hns3vf_check_dev_specifications(struct hns3_hw *hw)
1162 {
1163         if (hw->rss_ind_tbl_size == 0 ||
1164             hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
1165                 hns3_warn(hw, "the size of hash lookup table configured (%u)"
1166                               " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
1167                               HNS3_RSS_IND_TBL_SIZE_MAX);
1168                 return -EINVAL;
1169         }
1170
1171         return 0;
1172 }
1173
1174 static int
1175 hns3vf_query_dev_specifications(struct hns3_hw *hw)
1176 {
1177         struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
1178         int ret;
1179         int i;
1180
1181         for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1182                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
1183                                           true);
1184                 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1185         }
1186         hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
1187
1188         ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
1189         if (ret)
1190                 return ret;
1191
1192         hns3vf_parse_dev_specifications(hw, desc);
1193
1194         return hns3vf_check_dev_specifications(hw);
1195 }
1196
1197 void
1198 hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
1199 {
1200         uint16_t val = supported ? HNS3_PF_PUSH_LSC_CAP_SUPPORTED :
1201                                    HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
1202         uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
1203         struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1204
1205         if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
1206                 __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
1207                                           __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
1208 }
1209
1210 static void
1211 hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
1212 {
1213 #define HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS      500
1214
1215         struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1216         int32_t remain_ms = HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS;
1217         uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
1218         uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
1219         struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1220
1221         __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
1222                          __ATOMIC_RELEASE);
1223
1224         (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
1225                                 NULL, 0);
1226
1227         while (remain_ms > 0) {
1228                 rte_delay_ms(HNS3_POLL_RESPONE_MS);
1229                 if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
1230                         HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
1231                         break;
1232                 remain_ms--;
1233         }
1234
1235         /*
1236          * When exit above loop, the pf_push_lsc_cap could be one of the three
1237          * state: unknown (means pf not ack), not_supported, supported.
1238          * Here config it as 'not_supported' when it's 'unknown' state.
1239          */
1240         __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
1241                                   __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
1242
1243         if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
1244                 HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
1245                 hns3_info(hw, "detect PF support push link status change!");
1246         } else {
1247                 /*
1248                  * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
1249                  * declared RTE_PCI_DRV_INTR_LSC in drv_flags. So here cleared
1250                  * the RTE_ETH_DEV_INTR_LSC capability.
1251                  */
1252                 dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1253         }
1254 }
1255
1256 static int
1257 hns3vf_get_capability(struct hns3_hw *hw)
1258 {
1259         struct rte_pci_device *pci_dev;
1260         struct rte_eth_dev *eth_dev;
1261         uint8_t revision;
1262         int ret;
1263
1264         eth_dev = &rte_eth_devices[hw->data->port_id];
1265         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1266
1267         /* Get PCI revision id */
1268         ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
1269                                   HNS3_PCI_REVISION_ID);
1270         if (ret != HNS3_PCI_REVISION_ID_LEN) {
1271                 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
1272                              ret);
1273                 return -EIO;
1274         }
1275         hw->revision = revision;
1276
1277         if (revision < PCI_REVISION_ID_HIP09_A) {
1278                 hns3vf_set_default_dev_specifications(hw);
1279                 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
1280                 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
1281                 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
1282                 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
1283                 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
1284                 hw->rss_info.ipv6_sctp_offload_supported = false;
1285                 hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
1286                 return 0;
1287         }
1288
1289         ret = hns3vf_query_dev_specifications(hw);
1290         if (ret) {
1291                 PMD_INIT_LOG(ERR,
1292                              "failed to query dev specifications, ret = %d",
1293                              ret);
1294                 return ret;
1295         }
1296
1297         hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
1298         hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
1299         hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
1300         hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
1301         hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
1302         hw->rss_info.ipv6_sctp_offload_supported = true;
1303         hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
1304
1305         return 0;
1306 }
1307
1308 static int
1309 hns3vf_check_tqp_info(struct hns3_hw *hw)
1310 {
1311         if (hw->tqps_num == 0) {
1312                 PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF.");
1313                 return -EINVAL;
1314         }
1315
1316         if (hw->rss_size_max == 0) {
1317                 PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF.");
1318                 return -EINVAL;
1319         }
1320
1321         hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num);
1322
1323         return 0;
1324 }
1325
1326 static int
1327 hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
1328 {
1329         uint8_t resp_msg;
1330         int ret;
1331
1332         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN,
1333                                 HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0,
1334                                 true, &resp_msg, sizeof(resp_msg));
1335         if (ret) {
1336                 if (ret == -ETIME) {
1337                         /*
1338                          * Getting current port based VLAN state from PF driver
1339                          * will not affect VF driver's basic function. Because
1340                          * the VF driver relies on hns3 PF kernel ether driver,
1341                          * to avoid introducing compatibility issues with older
1342                          * version of PF driver, no failure will be returned
1343                          * when the return value is ETIME. This return value has
1344                          * the following scenarios:
1345                          * 1) Firmware didn't return the results in time
1346                          * 2) the result return by firmware is timeout
1347                          * 3) the older version of kernel side PF driver does
1348                          *    not support this mailbox message.
1349                          * For scenarios 1 and 2, it is most likely that a
1350                          * hardware error has occurred, or a hardware reset has
1351                          * occurred. In this case, these errors will be caught
1352                          * by other functions.
1353                          */
1354                         PMD_INIT_LOG(WARNING,
1355                                 "failed to get PVID state for timeout, maybe "
1356                                 "kernel side PF driver doesn't support this "
1357                                 "mailbox message, or firmware didn't respond.");
1358                         resp_msg = HNS3_PORT_BASE_VLAN_DISABLE;
1359                 } else {
1360                         PMD_INIT_LOG(ERR, "failed to get port based VLAN state,"
1361                                 " ret = %d", ret);
1362                         return ret;
1363                 }
1364         }
1365         hw->port_base_vlan_cfg.state = resp_msg ?
1366                 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
1367         return 0;
1368 }
1369
1370 static int
1371 hns3vf_get_queue_info(struct hns3_hw *hw)
1372 {
1373 #define HNS3VF_TQPS_RSS_INFO_LEN        6
1374         uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
1375         int ret;
1376
1377         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
1378                                 resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
1379         if (ret) {
1380                 PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
1381                 return ret;
1382         }
1383
1384         memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
1385         memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
1386
1387         return hns3vf_check_tqp_info(hw);
1388 }
1389
1390 static int
1391 hns3vf_get_queue_depth(struct hns3_hw *hw)
1392 {
1393 #define HNS3VF_TQPS_DEPTH_INFO_LEN      4
1394         uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN];
1395         int ret;
1396
1397         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true,
1398                                 resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN);
1399         if (ret) {
1400                 PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d",
1401                              ret);
1402                 return ret;
1403         }
1404
1405         memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t));
1406         memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t));
1407
1408         return 0;
1409 }
1410
1411 static int
1412 hns3vf_get_tc_info(struct hns3_hw *hw)
1413 {
1414         uint8_t resp_msg;
1415         int ret;
1416         uint32_t i;
1417
1418         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0,
1419                                 true, &resp_msg, sizeof(resp_msg));
1420         if (ret) {
1421                 hns3_err(hw, "VF request to get TC info from PF failed %d",
1422                          ret);
1423                 return ret;
1424         }
1425
1426         hw->hw_tc_map = resp_msg;
1427
1428         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
1429                 if (hw->hw_tc_map & BIT(i))
1430                         hw->num_tc++;
1431         }
1432
1433         return 0;
1434 }
1435
1436 static int
1437 hns3vf_get_host_mac_addr(struct hns3_hw *hw)
1438 {
1439         uint8_t host_mac[RTE_ETHER_ADDR_LEN];
1440         int ret;
1441
1442         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
1443                                 true, host_mac, RTE_ETHER_ADDR_LEN);
1444         if (ret) {
1445                 hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
1446                 return ret;
1447         }
1448
1449         memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
1450
1451         return 0;
1452 }
1453
1454 static int
1455 hns3vf_get_configuration(struct hns3_hw *hw)
1456 {
1457         int ret;
1458
1459         hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
1460         hw->rss_dis_flag = false;
1461
1462         /* Get device capability */
1463         ret = hns3vf_get_capability(hw);
1464         if (ret) {
1465                 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
1466                 return ret;
1467         }
1468
1469         hns3vf_get_push_lsc_cap(hw);
1470
1471         /* Get queue configuration from PF */
1472         ret = hns3vf_get_queue_info(hw);
1473         if (ret)
1474                 return ret;
1475
1476         /* Get queue depth info from PF */
1477         ret = hns3vf_get_queue_depth(hw);
1478         if (ret)
1479                 return ret;
1480
1481         /* Get user defined VF MAC addr from PF */
1482         ret = hns3vf_get_host_mac_addr(hw);
1483         if (ret)
1484                 return ret;
1485
1486         ret = hns3vf_get_port_base_vlan_filter_state(hw);
1487         if (ret)
1488                 return ret;
1489
1490         /* Get tc configuration from PF */
1491         return hns3vf_get_tc_info(hw);
1492 }
1493
1494 static int
1495 hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q,
1496                             uint16_t nb_tx_q)
1497 {
1498         struct hns3_hw *hw = &hns->hw;
1499
1500         if (nb_rx_q < hw->num_tc) {
1501                 hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
1502                          nb_rx_q, hw->num_tc);
1503                 return -EINVAL;
1504         }
1505
1506         if (nb_tx_q < hw->num_tc) {
1507                 hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
1508                          nb_tx_q, hw->num_tc);
1509                 return -EINVAL;
1510         }
1511
1512         return hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
1513 }
1514
1515 static void
1516 hns3vf_request_link_info(struct hns3_hw *hw)
1517 {
1518         struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1519         bool send_req;
1520         int ret;
1521
1522         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1523                 return;
1524
1525         send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
1526                    vf->req_link_info_cnt > 0;
1527         if (!send_req)
1528                 return;
1529
1530         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
1531                                 NULL, 0);
1532         if (ret) {
1533                 hns3_err(hw, "failed to fetch link status, ret = %d", ret);
1534                 return;
1535         }
1536
1537         if (vf->req_link_info_cnt > 0)
1538                 vf->req_link_info_cnt--;
1539 }
1540
1541 void
1542 hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
1543                           uint32_t link_speed, uint8_t link_duplex)
1544 {
1545         struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
1546         struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
1547         struct hns3_mac *mac = &hw->mac;
1548         int ret;
1549
1550         /*
1551          * PF kernel driver may push link status when VF driver is in resetting,
1552          * driver will stop polling job in this case, after resetting done
1553          * driver will start polling job again.
1554          * When polling job started, driver will get initial link status by
1555          * sending request to PF kernel driver, then could update link status by
1556          * process PF kernel driver's link status mailbox message.
1557          */
1558         if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
1559                 return;
1560
1561         if (hw->adapter_state != HNS3_NIC_STARTED)
1562                 return;
1563
1564         mac->link_status = link_status;
1565         mac->link_speed = link_speed;
1566         mac->link_duplex = link_duplex;
1567         ret = hns3vf_dev_link_update(dev, 0);
1568         if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
1569                 hns3_start_report_lse(dev);
1570 }
1571
1572 static int
1573 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
1574 {
1575 #define HNS3VF_VLAN_MBX_MSG_LEN 5
1576         struct hns3_hw *hw = &hns->hw;
1577         uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
1578         uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
1579         uint8_t is_kill = on ? 0 : 1;
1580
1581         msg_data[0] = is_kill;
1582         memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1583         memcpy(&msg_data[3], &proto, sizeof(proto));
1584
1585         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
1586                                  msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
1587                                  0);
1588 }
1589
1590 static int
1591 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1592 {
1593         struct hns3_adapter *hns = dev->data->dev_private;
1594         struct hns3_hw *hw = &hns->hw;
1595         int ret;
1596
1597         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1598                 hns3_err(hw,
1599                          "vf set vlan id failed during resetting, vlan_id =%u",
1600                          vlan_id);
1601                 return -EIO;
1602         }
1603         rte_spinlock_lock(&hw->lock);
1604         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1605         rte_spinlock_unlock(&hw->lock);
1606         if (ret)
1607                 hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
1608                          vlan_id, ret);
1609
1610         return ret;
1611 }
1612
1613 static int
1614 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
1615 {
1616         uint8_t msg_data;
1617         int ret;
1618
1619         msg_data = enable ? 1 : 0;
1620         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
1621                                 &msg_data, sizeof(msg_data), false, NULL, 0);
1622         if (ret)
1623                 hns3_err(hw, "vf enable strip failed, ret =%d", ret);
1624
1625         return ret;
1626 }
1627
1628 static int
1629 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1630 {
1631         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1632         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1633         unsigned int tmp_mask;
1634         int ret = 0;
1635
1636         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1637                 hns3_err(hw, "vf set vlan offload failed during resetting, "
1638                              "mask = 0x%x", mask);
1639                 return -EIO;
1640         }
1641
1642         tmp_mask = (unsigned int)mask;
1643         /* Vlan stripping setting */
1644         if (tmp_mask & ETH_VLAN_STRIP_MASK) {
1645                 rte_spinlock_lock(&hw->lock);
1646                 /* Enable or disable VLAN stripping */
1647                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1648                         ret = hns3vf_en_hw_strip_rxvtag(hw, true);
1649                 else
1650                         ret = hns3vf_en_hw_strip_rxvtag(hw, false);
1651                 rte_spinlock_unlock(&hw->lock);
1652         }
1653
1654         return ret;
1655 }
1656
1657 static int
1658 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
1659 {
1660         struct rte_vlan_filter_conf *vfc;
1661         struct hns3_hw *hw = &hns->hw;
1662         uint16_t vlan_id;
1663         uint64_t vbit;
1664         uint64_t ids;
1665         int ret = 0;
1666         uint32_t i;
1667
1668         vfc = &hw->data->vlan_filter_conf;
1669         for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1670                 if (vfc->ids[i] == 0)
1671                         continue;
1672                 ids = vfc->ids[i];
1673                 while (ids) {
1674                         /*
1675                          * 64 means the num bits of ids, one bit corresponds to
1676                          * one vlan id
1677                          */
1678                         vlan_id = 64 * i;
1679                         /* count trailing zeroes */
1680                         vbit = ~ids & (ids - 1);
1681                         /* clear least significant bit set */
1682                         ids ^= (ids ^ (ids - 1)) ^ vbit;
1683                         for (; vbit;) {
1684                                 vbit >>= 1;
1685                                 vlan_id++;
1686                         }
1687                         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1688                         if (ret) {
1689                                 hns3_err(hw,
1690                                          "VF handle vlan table failed, ret =%d, on = %d",
1691                                          ret, on);
1692                                 return ret;
1693                         }
1694                 }
1695         }
1696
1697         return ret;
1698 }
1699
1700 static int
1701 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1702 {
1703         return hns3vf_handle_all_vlan_table(hns, 0);
1704 }
1705
1706 static int
1707 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1708 {
1709         struct hns3_hw *hw = &hns->hw;
1710         struct rte_eth_conf *dev_conf;
1711         bool en;
1712         int ret;
1713
1714         dev_conf = &hw->data->dev_conf;
1715         en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
1716                                                                    : false;
1717         ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1718         if (ret)
1719                 hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1720                          ret);
1721         return ret;
1722 }
1723
1724 static int
1725 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1726 {
1727         struct hns3_adapter *hns = dev->data->dev_private;
1728         struct rte_eth_dev_data *data = dev->data;
1729         struct hns3_hw *hw = &hns->hw;
1730         int ret;
1731
1732         if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1733             data->dev_conf.txmode.hw_vlan_reject_untagged ||
1734             data->dev_conf.txmode.hw_vlan_insert_pvid) {
1735                 hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1736                               "or hw_vlan_insert_pvid is not support!");
1737         }
1738
1739         /* Apply vlan offload setting */
1740         ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1741         if (ret)
1742                 hns3_err(hw, "dev config vlan offload failed, ret =%d", ret);
1743
1744         return ret;
1745 }
1746
1747 static int
1748 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1749 {
1750         uint8_t msg_data;
1751
1752         msg_data = alive ? 1 : 0;
1753         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
1754                                  sizeof(msg_data), false, NULL, 0);
1755 }
1756
1757 static void
1758 hns3vf_keep_alive_handler(void *param)
1759 {
1760         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1761         struct hns3_adapter *hns = eth_dev->data->dev_private;
1762         struct hns3_hw *hw = &hns->hw;
1763         int ret;
1764
1765         ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
1766                                 false, NULL, 0);
1767         if (ret)
1768                 hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1769                          ret);
1770
1771         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1772                           eth_dev);
1773 }
1774
1775 static void
1776 hns3vf_service_handler(void *param)
1777 {
1778         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1779         struct hns3_adapter *hns = eth_dev->data->dev_private;
1780         struct hns3_hw *hw = &hns->hw;
1781
1782         /*
1783          * The query link status and reset processing are executed in the
1784          * interrupt thread. When the IMP reset occurs, IMP will not respond,
1785          * and the query operation will timeout after 30ms. In the case of
1786          * multiple PF/VFs, each query failure timeout causes the IMP reset
1787          * interrupt to fail to respond within 100ms.
1788          * Before querying the link status, check whether there is a reset
1789          * pending, and if so, abandon the query.
1790          */
1791         if (!hns3vf_is_reset_pending(hns))
1792                 hns3vf_request_link_info(hw);
1793         else
1794                 hns3_warn(hw, "Cancel the query when reset is pending");
1795
1796         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1797                           eth_dev);
1798 }
1799
1800 static void
1801 hns3vf_start_poll_job(struct rte_eth_dev *dev)
1802 {
1803 #define HNS3_REQUEST_LINK_INFO_REMAINS_CNT      3
1804
1805         struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1806
1807         if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
1808                 vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
1809
1810         __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
1811
1812         hns3vf_service_handler(dev);
1813 }
1814
1815 static void
1816 hns3vf_stop_poll_job(struct rte_eth_dev *dev)
1817 {
1818         struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1819
1820         rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1821
1822         __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
1823 }
1824
1825 static int
1826 hns3_query_vf_resource(struct hns3_hw *hw)
1827 {
1828         struct hns3_vf_res_cmd *req;
1829         struct hns3_cmd_desc desc;
1830         uint16_t num_msi;
1831         int ret;
1832
1833         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
1834         ret = hns3_cmd_send(hw, &desc, 1);
1835         if (ret) {
1836                 hns3_err(hw, "query vf resource failed, ret = %d", ret);
1837                 return ret;
1838         }
1839
1840         req = (struct hns3_vf_res_cmd *)desc.data;
1841         num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
1842                                  HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
1843         if (num_msi < HNS3_MIN_VECTOR_NUM) {
1844                 hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
1845                          num_msi, HNS3_MIN_VECTOR_NUM);
1846                 return -EINVAL;
1847         }
1848
1849         hw->num_msi = num_msi;
1850
1851         return 0;
1852 }
1853
1854 static int
1855 hns3vf_init_hardware(struct hns3_adapter *hns)
1856 {
1857         struct hns3_hw *hw = &hns->hw;
1858         uint16_t mtu = hw->data->mtu;
1859         int ret;
1860
1861         ret = hns3vf_set_promisc_mode(hw, true, false, false);
1862         if (ret)
1863                 return ret;
1864
1865         ret = hns3vf_config_mtu(hw, mtu);
1866         if (ret)
1867                 goto err_init_hardware;
1868
1869         ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1870         if (ret) {
1871                 PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1872                 goto err_init_hardware;
1873         }
1874
1875         ret = hns3_config_gro(hw, false);
1876         if (ret) {
1877                 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1878                 goto err_init_hardware;
1879         }
1880
1881         /*
1882          * In the initialization clearing the all hardware mapping relationship
1883          * configurations between queues and interrupt vectors is needed, so
1884          * some error caused by the residual configurations, such as the
1885          * unexpected interrupt, can be avoid.
1886          */
1887         ret = hns3vf_init_ring_with_vector(hw);
1888         if (ret) {
1889                 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
1890                 goto err_init_hardware;
1891         }
1892
1893         ret = hns3vf_set_alive(hw, true);
1894         if (ret) {
1895                 PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1896                 goto err_init_hardware;
1897         }
1898
1899         return 0;
1900
1901 err_init_hardware:
1902         (void)hns3vf_set_promisc_mode(hw, false, false, false);
1903         return ret;
1904 }
1905
1906 static int
1907 hns3vf_clear_vport_list(struct hns3_hw *hw)
1908 {
1909         return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
1910                                  HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
1911                                  NULL, 0);
1912 }
1913
1914 static int
1915 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1916 {
1917         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1918         struct hns3_adapter *hns = eth_dev->data->dev_private;
1919         struct hns3_hw *hw = &hns->hw;
1920         int ret;
1921
1922         PMD_INIT_FUNC_TRACE();
1923
1924         /* Get hardware io base address from pcie BAR2 IO space */
1925         hw->io_base = pci_dev->mem_resource[2].addr;
1926
1927         /* Firmware command queue initialize */
1928         ret = hns3_cmd_init_queue(hw);
1929         if (ret) {
1930                 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1931                 goto err_cmd_init_queue;
1932         }
1933
1934         /* Firmware command initialize */
1935         ret = hns3_cmd_init(hw);
1936         if (ret) {
1937                 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1938                 goto err_cmd_init;
1939         }
1940
1941         /* Get VF resource */
1942         ret = hns3_query_vf_resource(hw);
1943         if (ret)
1944                 goto err_cmd_init;
1945
1946         rte_spinlock_init(&hw->mbx_resp.lock);
1947
1948         hns3vf_clear_event_cause(hw, 0);
1949
1950         ret = rte_intr_callback_register(&pci_dev->intr_handle,
1951                                          hns3vf_interrupt_handler, eth_dev);
1952         if (ret) {
1953                 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1954                 goto err_intr_callback_register;
1955         }
1956
1957         /* Enable interrupt */
1958         rte_intr_enable(&pci_dev->intr_handle);
1959         hns3vf_enable_irq0(hw);
1960
1961         /* Get configuration from PF */
1962         ret = hns3vf_get_configuration(hw);
1963         if (ret) {
1964                 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1965                 goto err_get_config;
1966         }
1967
1968         ret = hns3_tqp_stats_init(hw);
1969         if (ret)
1970                 goto err_get_config;
1971
1972         /* Hardware statistics of imissed registers cleared. */
1973         ret = hns3_update_imissed_stats(hw, true);
1974         if (ret) {
1975                 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
1976                 goto err_set_tc_queue;
1977         }
1978
1979         ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num);
1980         if (ret) {
1981                 PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
1982                 goto err_set_tc_queue;
1983         }
1984
1985         ret = hns3vf_clear_vport_list(hw);
1986         if (ret) {
1987                 PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1988                 goto err_set_tc_queue;
1989         }
1990
1991         ret = hns3vf_init_hardware(hns);
1992         if (ret)
1993                 goto err_set_tc_queue;
1994
1995         hns3_rss_set_default_args(hw);
1996
1997         return 0;
1998
1999 err_set_tc_queue:
2000         hns3_tqp_stats_uninit(hw);
2001
2002 err_get_config:
2003         hns3vf_disable_irq0(hw);
2004         rte_intr_disable(&pci_dev->intr_handle);
2005         hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
2006                              eth_dev);
2007 err_intr_callback_register:
2008 err_cmd_init:
2009         hns3_cmd_uninit(hw);
2010         hns3_cmd_destroy_queue(hw);
2011 err_cmd_init_queue:
2012         hw->io_base = NULL;
2013
2014         return ret;
2015 }
2016
2017 static void
2018 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
2019 {
2020         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2021         struct hns3_adapter *hns = eth_dev->data->dev_private;
2022         struct hns3_hw *hw = &hns->hw;
2023
2024         PMD_INIT_FUNC_TRACE();
2025
2026         hns3_rss_uninit(hns);
2027         (void)hns3_config_gro(hw, false);
2028         (void)hns3vf_set_alive(hw, false);
2029         (void)hns3vf_set_promisc_mode(hw, false, false, false);
2030         hns3_tqp_stats_uninit(hw);
2031         hns3vf_disable_irq0(hw);
2032         rte_intr_disable(&pci_dev->intr_handle);
2033         hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
2034                              eth_dev);
2035         hns3_cmd_uninit(hw);
2036         hns3_cmd_destroy_queue(hw);
2037         hw->io_base = NULL;
2038 }
2039
2040 static int
2041 hns3vf_do_stop(struct hns3_adapter *hns)
2042 {
2043         struct hns3_hw *hw = &hns->hw;
2044         int ret;
2045
2046         hw->mac.link_status = ETH_LINK_DOWN;
2047
2048         /*
2049          * The "hns3vf_do_stop" function will also be called by .stop_service to
2050          * prepare reset. At the time of global or IMP reset, the command cannot
2051          * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
2052          * accessed during the reset process. So the mbuf can not be released
2053          * during reset and is required to be released after the reset is
2054          * completed.
2055          */
2056         if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
2057                 hns3_dev_release_mbufs(hns);
2058
2059         if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
2060                 hns3vf_configure_mac_addr(hns, true);
2061                 ret = hns3_reset_all_tqps(hns);
2062                 if (ret) {
2063                         hns3_err(hw, "failed to reset all queues ret = %d",
2064                                  ret);
2065                         return ret;
2066                 }
2067         }
2068         return 0;
2069 }
2070
2071 static void
2072 hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
2073 {
2074         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2075         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2076         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2077         uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
2078         uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
2079         uint16_t q_id;
2080
2081         if (dev->data->dev_conf.intr_conf.rxq == 0)
2082                 return;
2083
2084         /* unmap the ring with vector */
2085         if (rte_intr_allow_others(intr_handle)) {
2086                 vec = RTE_INTR_VEC_RXTX_OFFSET;
2087                 base = RTE_INTR_VEC_RXTX_OFFSET;
2088         }
2089         if (rte_intr_dp_is_en(intr_handle)) {
2090                 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
2091                         (void)hns3vf_bind_ring_with_vector(hw, vec, false,
2092                                                            HNS3_RING_TYPE_RX,
2093                                                            q_id);
2094                         if (vec < base + intr_handle->nb_efd - 1)
2095                                 vec++;
2096                 }
2097         }
2098         /* Clean datapath event and queue/vec mapping */
2099         rte_intr_efd_disable(intr_handle);
2100         if (intr_handle->intr_vec) {
2101                 rte_free(intr_handle->intr_vec);
2102                 intr_handle->intr_vec = NULL;
2103         }
2104 }
2105
2106 static int
2107 hns3vf_dev_stop(struct rte_eth_dev *dev)
2108 {
2109         struct hns3_adapter *hns = dev->data->dev_private;
2110         struct hns3_hw *hw = &hns->hw;
2111
2112         PMD_INIT_FUNC_TRACE();
2113         dev->data->dev_started = 0;
2114
2115         hw->adapter_state = HNS3_NIC_STOPPING;
2116         hns3_set_rxtx_function(dev);
2117         rte_wmb();
2118         /* Disable datapath on secondary process. */
2119         hns3_mp_req_stop_rxtx(dev);
2120         /* Prevent crashes when queues are still in use. */
2121         rte_delay_ms(hw->tqps_num);
2122
2123         rte_spinlock_lock(&hw->lock);
2124         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
2125                 hns3_stop_tqps(hw);
2126                 hns3vf_do_stop(hns);
2127                 hns3vf_unmap_rx_interrupt(dev);
2128                 hw->adapter_state = HNS3_NIC_CONFIGURED;
2129         }
2130         hns3_rx_scattered_reset(dev);
2131         hns3vf_stop_poll_job(dev);
2132         hns3_stop_report_lse(dev);
2133         rte_spinlock_unlock(&hw->lock);
2134
2135         return 0;
2136 }
2137
2138 static int
2139 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
2140 {
2141         struct hns3_adapter *hns = eth_dev->data->dev_private;
2142         struct hns3_hw *hw = &hns->hw;
2143         int ret = 0;
2144
2145         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2146                 rte_free(eth_dev->process_private);
2147                 eth_dev->process_private = NULL;
2148                 return 0;
2149         }
2150
2151         if (hw->adapter_state == HNS3_NIC_STARTED)
2152                 ret = hns3vf_dev_stop(eth_dev);
2153
2154         hw->adapter_state = HNS3_NIC_CLOSING;
2155         hns3_reset_abort(hns);
2156         hw->adapter_state = HNS3_NIC_CLOSED;
2157         rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
2158         hns3vf_configure_all_mc_mac_addr(hns, true);
2159         hns3vf_remove_all_vlan_table(hns);
2160         hns3vf_uninit_vf(eth_dev);
2161         hns3_free_all_queues(eth_dev);
2162         rte_free(hw->reset.wait_data);
2163         rte_free(eth_dev->process_private);
2164         eth_dev->process_private = NULL;
2165         hns3_mp_uninit_primary();
2166         hns3_warn(hw, "Close port %u finished", hw->data->port_id);
2167
2168         return ret;
2169 }
2170
2171 static int
2172 hns3vf_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
2173                       size_t fw_size)
2174 {
2175         struct hns3_adapter *hns = eth_dev->data->dev_private;
2176         struct hns3_hw *hw = &hns->hw;
2177         uint32_t version = hw->fw_version;
2178         int ret;
2179
2180         ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
2181                        hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
2182                                       HNS3_FW_VERSION_BYTE3_S),
2183                        hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
2184                                       HNS3_FW_VERSION_BYTE2_S),
2185                        hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
2186                                       HNS3_FW_VERSION_BYTE1_S),
2187                        hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
2188                                       HNS3_FW_VERSION_BYTE0_S));
2189         if (ret < 0)
2190                 return -EINVAL;
2191
2192         ret += 1; /* add the size of '\0' */
2193         if (fw_size < (size_t)ret)
2194                 return ret;
2195         else
2196                 return 0;
2197 }
2198
2199 static int
2200 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
2201                        __rte_unused int wait_to_complete)
2202 {
2203         struct hns3_adapter *hns = eth_dev->data->dev_private;
2204         struct hns3_hw *hw = &hns->hw;
2205         struct hns3_mac *mac = &hw->mac;
2206         struct rte_eth_link new_link;
2207
2208         memset(&new_link, 0, sizeof(new_link));
2209         switch (mac->link_speed) {
2210         case ETH_SPEED_NUM_10M:
2211         case ETH_SPEED_NUM_100M:
2212         case ETH_SPEED_NUM_1G:
2213         case ETH_SPEED_NUM_10G:
2214         case ETH_SPEED_NUM_25G:
2215         case ETH_SPEED_NUM_40G:
2216         case ETH_SPEED_NUM_50G:
2217         case ETH_SPEED_NUM_100G:
2218         case ETH_SPEED_NUM_200G:
2219                 new_link.link_speed = mac->link_speed;
2220                 break;
2221         default:
2222                 if (mac->link_status)
2223                         new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
2224                 else
2225                         new_link.link_speed = ETH_SPEED_NUM_NONE;
2226                 break;
2227         }
2228
2229         new_link.link_duplex = mac->link_duplex;
2230         new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
2231         new_link.link_autoneg =
2232             !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
2233
2234         return rte_eth_linkstatus_set(eth_dev, &new_link);
2235 }
2236
2237 static int
2238 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
2239 {
2240         struct hns3_hw *hw = &hns->hw;
2241         uint16_t nb_rx_q = hw->data->nb_rx_queues;
2242         uint16_t nb_tx_q = hw->data->nb_tx_queues;
2243         int ret;
2244
2245         ret = hns3vf_set_tc_queue_mapping(hns, nb_rx_q, nb_tx_q);
2246         if (ret)
2247                 return ret;
2248
2249         hns3_enable_rxd_adv_layout(hw);
2250
2251         ret = hns3_init_queues(hns, reset_queue);
2252         if (ret)
2253                 hns3_err(hw, "failed to init queues, ret = %d.", ret);
2254
2255         return ret;
2256 }
2257
2258 static int
2259 hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
2260 {
2261         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2262         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2263         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2264         uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
2265         uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
2266         uint32_t intr_vector;
2267         uint16_t q_id;
2268         int ret;
2269
2270         /*
2271          * hns3 needs a separate interrupt to be used as event interrupt which
2272          * could not be shared with task queue pair, so KERNEL drivers need
2273          * support multiple interrupt vectors.
2274          */
2275         if (dev->data->dev_conf.intr_conf.rxq == 0 ||
2276             !rte_intr_cap_multiple(intr_handle))
2277                 return 0;
2278
2279         rte_intr_disable(intr_handle);
2280         intr_vector = hw->used_rx_queues;
2281         /* It creates event fd for each intr vector when MSIX is used */
2282         if (rte_intr_efd_enable(intr_handle, intr_vector))
2283                 return -EINVAL;
2284
2285         if (intr_handle->intr_vec == NULL) {
2286                 intr_handle->intr_vec =
2287                         rte_zmalloc("intr_vec",
2288                                     hw->used_rx_queues * sizeof(int), 0);
2289                 if (intr_handle->intr_vec == NULL) {
2290                         hns3_err(hw, "Failed to allocate %u rx_queues"
2291                                      " intr_vec", hw->used_rx_queues);
2292                         ret = -ENOMEM;
2293                         goto vf_alloc_intr_vec_error;
2294                 }
2295         }
2296
2297         if (rte_intr_allow_others(intr_handle)) {
2298                 vec = RTE_INTR_VEC_RXTX_OFFSET;
2299                 base = RTE_INTR_VEC_RXTX_OFFSET;
2300         }
2301
2302         for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
2303                 ret = hns3vf_bind_ring_with_vector(hw, vec, true,
2304                                                    HNS3_RING_TYPE_RX, q_id);
2305                 if (ret)
2306                         goto vf_bind_vector_error;
2307                 intr_handle->intr_vec[q_id] = vec;
2308                 /*
2309                  * If there are not enough efds (e.g. not enough interrupt),
2310                  * remaining queues will be bond to the last interrupt.
2311                  */
2312                 if (vec < base + intr_handle->nb_efd - 1)
2313                         vec++;
2314         }
2315         rte_intr_enable(intr_handle);
2316         return 0;
2317
2318 vf_bind_vector_error:
2319         free(intr_handle->intr_vec);
2320         intr_handle->intr_vec = NULL;
2321 vf_alloc_intr_vec_error:
2322         rte_intr_efd_disable(intr_handle);
2323         return ret;
2324 }
2325
2326 static int
2327 hns3vf_restore_rx_interrupt(struct hns3_hw *hw)
2328 {
2329         struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
2330         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2331         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2332         uint16_t q_id;
2333         int ret;
2334
2335         if (dev->data->dev_conf.intr_conf.rxq == 0)
2336                 return 0;
2337
2338         if (rte_intr_dp_is_en(intr_handle)) {
2339                 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
2340                         ret = hns3vf_bind_ring_with_vector(hw,
2341                                         intr_handle->intr_vec[q_id], true,
2342                                         HNS3_RING_TYPE_RX, q_id);
2343                         if (ret)
2344                                 return ret;
2345                 }
2346         }
2347
2348         return 0;
2349 }
2350
2351 static void
2352 hns3vf_restore_filter(struct rte_eth_dev *dev)
2353 {
2354         hns3_restore_rss_filter(dev);
2355 }
2356
2357 static int
2358 hns3vf_dev_start(struct rte_eth_dev *dev)
2359 {
2360         struct hns3_adapter *hns = dev->data->dev_private;
2361         struct hns3_hw *hw = &hns->hw;
2362         int ret;
2363
2364         PMD_INIT_FUNC_TRACE();
2365         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
2366                 return -EBUSY;
2367
2368         rte_spinlock_lock(&hw->lock);
2369         hw->adapter_state = HNS3_NIC_STARTING;
2370         ret = hns3vf_do_start(hns, true);
2371         if (ret) {
2372                 hw->adapter_state = HNS3_NIC_CONFIGURED;
2373                 rte_spinlock_unlock(&hw->lock);
2374                 return ret;
2375         }
2376         ret = hns3vf_map_rx_interrupt(dev);
2377         if (ret)
2378                 goto map_rx_inter_err;
2379
2380         /*
2381          * There are three register used to control the status of a TQP
2382          * (contains a pair of Tx queue and Rx queue) in the new version network
2383          * engine. One is used to control the enabling of Tx queue, the other is
2384          * used to control the enabling of Rx queue, and the last is the master
2385          * switch used to control the enabling of the tqp. The Tx register and
2386          * TQP register must be enabled at the same time to enable a Tx queue.
2387          * The same applies to the Rx queue. For the older network enginem, this
2388          * function only refresh the enabled flag, and it is used to update the
2389          * status of queue in the dpdk framework.
2390          */
2391         ret = hns3_start_all_txqs(dev);
2392         if (ret)
2393                 goto map_rx_inter_err;
2394
2395         ret = hns3_start_all_rxqs(dev);
2396         if (ret)
2397                 goto start_all_rxqs_fail;
2398
2399         hw->adapter_state = HNS3_NIC_STARTED;
2400         rte_spinlock_unlock(&hw->lock);
2401
2402         hns3_rx_scattered_calc(dev);
2403         hns3_set_rxtx_function(dev);
2404         hns3_mp_req_start_rxtx(dev);
2405
2406         hns3vf_restore_filter(dev);
2407
2408         /* Enable interrupt of all rx queues before enabling queues */
2409         hns3_dev_all_rx_queue_intr_enable(hw, true);
2410         hns3_start_tqps(hw);
2411
2412         if (dev->data->dev_conf.intr_conf.lsc != 0)
2413                 hns3vf_dev_link_update(dev, 0);
2414         hns3vf_start_poll_job(dev);
2415
2416         return ret;
2417
2418 start_all_rxqs_fail:
2419         hns3_stop_all_txqs(dev);
2420 map_rx_inter_err:
2421         (void)hns3vf_do_stop(hns);
2422         hw->adapter_state = HNS3_NIC_CONFIGURED;
2423         rte_spinlock_unlock(&hw->lock);
2424
2425         return ret;
2426 }
2427
2428 static bool
2429 is_vf_reset_done(struct hns3_hw *hw)
2430 {
2431 #define HNS3_FUN_RST_ING_BITS \
2432         (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
2433          BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
2434          BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
2435          BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
2436
2437         uint32_t val;
2438
2439         if (hw->reset.level == HNS3_VF_RESET) {
2440                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
2441                 if (val & HNS3_VF_RST_ING_BIT)
2442                         return false;
2443         } else {
2444                 val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
2445                 if (val & HNS3_FUN_RST_ING_BITS)
2446                         return false;
2447         }
2448         return true;
2449 }
2450
2451 bool
2452 hns3vf_is_reset_pending(struct hns3_adapter *hns)
2453 {
2454         struct hns3_hw *hw = &hns->hw;
2455         enum hns3_reset_level reset;
2456
2457         /*
2458          * According to the protocol of PCIe, FLR to a PF device resets the PF
2459          * state as well as the SR-IOV extended capability including VF Enable
2460          * which means that VFs no longer exist.
2461          *
2462          * HNS3_VF_FULL_RESET means PF device is in FLR reset. when PF device
2463          * is in FLR stage, the register state of VF device is not reliable,
2464          * so register states detection can not be carried out. In this case,
2465          * we just ignore the register states and return false to indicate that
2466          * there are no other reset states that need to be processed by driver.
2467          */
2468         if (hw->reset.level == HNS3_VF_FULL_RESET)
2469                 return false;
2470
2471         /* Check the registers to confirm whether there is reset pending */
2472         hns3vf_check_event_cause(hns, NULL);
2473         reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
2474         if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET &&
2475             hw->reset.level < reset) {
2476                 hns3_warn(hw, "High level reset %d is pending", reset);
2477                 return true;
2478         }
2479         return false;
2480 }
2481
2482 static int
2483 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
2484 {
2485         struct hns3_hw *hw = &hns->hw;
2486         struct hns3_wait_data *wait_data = hw->reset.wait_data;
2487         struct timeval tv;
2488
2489         if (wait_data->result == HNS3_WAIT_SUCCESS) {
2490                 /*
2491                  * After vf reset is ready, the PF may not have completed
2492                  * the reset processing. The vf sending mbox to PF may fail
2493                  * during the pf reset, so it is better to add extra delay.
2494                  */
2495                 if (hw->reset.level == HNS3_VF_FUNC_RESET ||
2496                     hw->reset.level == HNS3_FLR_RESET)
2497                         return 0;
2498                 /* Reset retry process, no need to add extra delay. */
2499                 if (hw->reset.attempts)
2500                         return 0;
2501                 if (wait_data->check_completion == NULL)
2502                         return 0;
2503
2504                 wait_data->check_completion = NULL;
2505                 wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC;
2506                 wait_data->count = 1;
2507                 wait_data->result = HNS3_WAIT_REQUEST;
2508                 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
2509                                   wait_data);
2510                 hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete");
2511                 return -EAGAIN;
2512         } else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
2513                 gettimeofday(&tv, NULL);
2514                 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
2515                           tv.tv_sec, tv.tv_usec);
2516                 return -ETIME;
2517         } else if (wait_data->result == HNS3_WAIT_REQUEST)
2518                 return -EAGAIN;
2519
2520         wait_data->hns = hns;
2521         wait_data->check_completion = is_vf_reset_done;
2522         wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
2523                                       HNS3VF_RESET_WAIT_MS + get_timeofday_ms();
2524         wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
2525         wait_data->count = HNS3VF_RESET_WAIT_CNT;
2526         wait_data->result = HNS3_WAIT_REQUEST;
2527         rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
2528         return -EAGAIN;
2529 }
2530
2531 static int
2532 hns3vf_prepare_reset(struct hns3_adapter *hns)
2533 {
2534         struct hns3_hw *hw = &hns->hw;
2535         int ret;
2536
2537         if (hw->reset.level == HNS3_VF_FUNC_RESET) {
2538                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
2539                                         0, true, NULL, 0);
2540                 if (ret)
2541                         return ret;
2542         }
2543         __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
2544
2545         return 0;
2546 }
2547
2548 static int
2549 hns3vf_stop_service(struct hns3_adapter *hns)
2550 {
2551         struct hns3_hw *hw = &hns->hw;
2552         struct rte_eth_dev *eth_dev;
2553
2554         eth_dev = &rte_eth_devices[hw->data->port_id];
2555         if (hw->adapter_state == HNS3_NIC_STARTED) {
2556                 /*
2557                  * Make sure call update link status before hns3vf_stop_poll_job
2558                  * because update link status depend on polling job exist.
2559                  */
2560                 hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
2561                                           hw->mac.link_duplex);
2562                 hns3vf_stop_poll_job(eth_dev);
2563         }
2564         hw->mac.link_status = ETH_LINK_DOWN;
2565
2566         hns3_set_rxtx_function(eth_dev);
2567         rte_wmb();
2568         /* Disable datapath on secondary process. */
2569         hns3_mp_req_stop_rxtx(eth_dev);
2570         rte_delay_ms(hw->tqps_num);
2571
2572         rte_spinlock_lock(&hw->lock);
2573         if (hw->adapter_state == HNS3_NIC_STARTED ||
2574             hw->adapter_state == HNS3_NIC_STOPPING) {
2575                 hns3_enable_all_queues(hw, false);
2576                 hns3vf_do_stop(hns);
2577                 hw->reset.mbuf_deferred_free = true;
2578         } else
2579                 hw->reset.mbuf_deferred_free = false;
2580
2581         /*
2582          * It is cumbersome for hardware to pick-and-choose entries for deletion
2583          * from table space. Hence, for function reset software intervention is
2584          * required to delete the entries.
2585          */
2586         if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
2587                 hns3vf_configure_all_mc_mac_addr(hns, true);
2588         rte_spinlock_unlock(&hw->lock);
2589
2590         return 0;
2591 }
2592
2593 static int
2594 hns3vf_start_service(struct hns3_adapter *hns)
2595 {
2596         struct hns3_hw *hw = &hns->hw;
2597         struct rte_eth_dev *eth_dev;
2598
2599         eth_dev = &rte_eth_devices[hw->data->port_id];
2600         hns3_set_rxtx_function(eth_dev);
2601         hns3_mp_req_start_rxtx(eth_dev);
2602         if (hw->adapter_state == HNS3_NIC_STARTED) {
2603                 hns3vf_start_poll_job(eth_dev);
2604
2605                 /* Enable interrupt of all rx queues before enabling queues */
2606                 hns3_dev_all_rx_queue_intr_enable(hw, true);
2607                 /*
2608                  * Enable state of each rxq and txq will be recovered after
2609                  * reset, so we need to restore them before enable all tqps;
2610                  */
2611                 hns3_restore_tqp_enable_state(hw);
2612                 /*
2613                  * When finished the initialization, enable queues to receive
2614                  * and transmit packets.
2615                  */
2616                 hns3_enable_all_queues(hw, true);
2617         }
2618
2619         return 0;
2620 }
2621
2622 static int
2623 hns3vf_check_default_mac_change(struct hns3_hw *hw)
2624 {
2625         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
2626         struct rte_ether_addr *hw_mac;
2627         int ret;
2628
2629         /*
2630          * The hns3 PF ethdev driver in kernel support setting VF MAC address
2631          * on the host by "ip link set ..." command. If the hns3 PF kernel
2632          * ethdev driver sets the MAC address for VF device after the
2633          * initialization of the related VF device, the PF driver will notify
2634          * VF driver to reset VF device to make the new MAC address effective
2635          * immediately. The hns3 VF PMD driver should check whether the MAC
2636          * address has been changed by the PF kernel ethdev driver, if changed
2637          * VF driver should configure hardware using the new MAC address in the
2638          * recovering hardware configuration stage of the reset process.
2639          */
2640         ret = hns3vf_get_host_mac_addr(hw);
2641         if (ret)
2642                 return ret;
2643
2644         hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
2645         ret = rte_is_zero_ether_addr(hw_mac);
2646         if (ret) {
2647                 rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
2648         } else {
2649                 ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
2650                 if (!ret) {
2651                         rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
2652                         hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
2653                                               &hw->data->mac_addrs[0]);
2654                         hns3_warn(hw, "Default MAC address has been changed to:"
2655                                   " %s by the host PF kernel ethdev driver",
2656                                   mac_str);
2657                 }
2658         }
2659
2660         return 0;
2661 }
2662
2663 static int
2664 hns3vf_restore_conf(struct hns3_adapter *hns)
2665 {
2666         struct hns3_hw *hw = &hns->hw;
2667         int ret;
2668
2669         ret = hns3vf_check_default_mac_change(hw);
2670         if (ret)
2671                 return ret;
2672
2673         ret = hns3vf_configure_mac_addr(hns, false);
2674         if (ret)
2675                 return ret;
2676
2677         ret = hns3vf_configure_all_mc_mac_addr(hns, false);
2678         if (ret)
2679                 goto err_mc_mac;
2680
2681         ret = hns3vf_restore_promisc(hns);
2682         if (ret)
2683                 goto err_vlan_table;
2684
2685         ret = hns3vf_restore_vlan_conf(hns);
2686         if (ret)
2687                 goto err_vlan_table;
2688
2689         ret = hns3vf_get_port_base_vlan_filter_state(hw);
2690         if (ret)
2691                 goto err_vlan_table;
2692
2693         ret = hns3vf_restore_rx_interrupt(hw);
2694         if (ret)
2695                 goto err_vlan_table;
2696
2697         ret = hns3_restore_gro_conf(hw);
2698         if (ret)
2699                 goto err_vlan_table;
2700
2701         if (hw->adapter_state == HNS3_NIC_STARTED) {
2702                 ret = hns3vf_do_start(hns, false);
2703                 if (ret)
2704                         goto err_vlan_table;
2705                 hns3_info(hw, "hns3vf dev restart successful!");
2706         } else if (hw->adapter_state == HNS3_NIC_STOPPING)
2707                 hw->adapter_state = HNS3_NIC_CONFIGURED;
2708         return 0;
2709
2710 err_vlan_table:
2711         hns3vf_configure_all_mc_mac_addr(hns, true);
2712 err_mc_mac:
2713         hns3vf_configure_mac_addr(hns, true);
2714         return ret;
2715 }
2716
2717 static enum hns3_reset_level
2718 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
2719 {
2720         enum hns3_reset_level reset_level;
2721
2722         /* return the highest priority reset level amongst all */
2723         if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
2724                 reset_level = HNS3_VF_RESET;
2725         else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
2726                 reset_level = HNS3_VF_FULL_RESET;
2727         else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
2728                 reset_level = HNS3_VF_PF_FUNC_RESET;
2729         else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
2730                 reset_level = HNS3_VF_FUNC_RESET;
2731         else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
2732                 reset_level = HNS3_FLR_RESET;
2733         else
2734                 reset_level = HNS3_NONE_RESET;
2735
2736         if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
2737                 return HNS3_NONE_RESET;
2738
2739         return reset_level;
2740 }
2741
2742 static void
2743 hns3vf_reset_service(void *param)
2744 {
2745         struct hns3_adapter *hns = (struct hns3_adapter *)param;
2746         struct hns3_hw *hw = &hns->hw;
2747         enum hns3_reset_level reset_level;
2748         struct timeval tv_delta;
2749         struct timeval tv_start;
2750         struct timeval tv;
2751         uint64_t msec;
2752
2753         /*
2754          * The interrupt is not triggered within the delay time.
2755          * The interrupt may have been lost. It is necessary to handle
2756          * the interrupt to recover from the error.
2757          */
2758         if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2759                             SCHEDULE_DEFERRED) {
2760                 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
2761                                  __ATOMIC_RELAXED);
2762                 hns3_err(hw, "Handling interrupts in delayed tasks");
2763                 hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
2764                 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2765                 if (reset_level == HNS3_NONE_RESET) {
2766                         hns3_err(hw, "No reset level is set, try global reset");
2767                         hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
2768                 }
2769         }
2770         __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
2771
2772         /*
2773          * Hardware reset has been notified, we now have to poll & check if
2774          * hardware has actually completed the reset sequence.
2775          */
2776         reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2777         if (reset_level != HNS3_NONE_RESET) {
2778                 gettimeofday(&tv_start, NULL);
2779                 hns3_reset_process(hns, reset_level);
2780                 gettimeofday(&tv, NULL);
2781                 timersub(&tv, &tv_start, &tv_delta);
2782                 msec = tv_delta.tv_sec * MSEC_PER_SEC +
2783                        tv_delta.tv_usec / USEC_PER_MSEC;
2784                 if (msec > HNS3_RESET_PROCESS_MS)
2785                         hns3_err(hw, "%d handle long time delta %" PRIu64
2786                                  " ms time=%ld.%.6ld",
2787                                  hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
2788         }
2789 }
2790
2791 static int
2792 hns3vf_reinit_dev(struct hns3_adapter *hns)
2793 {
2794         struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
2795         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2796         struct hns3_hw *hw = &hns->hw;
2797         int ret;
2798
2799         if (hw->reset.level == HNS3_VF_FULL_RESET) {
2800                 rte_intr_disable(&pci_dev->intr_handle);
2801                 ret = hns3vf_set_bus_master(pci_dev, true);
2802                 if (ret < 0) {
2803                         hns3_err(hw, "failed to set pci bus, ret = %d", ret);
2804                         return ret;
2805                 }
2806         }
2807
2808         /* Firmware command initialize */
2809         ret = hns3_cmd_init(hw);
2810         if (ret) {
2811                 hns3_err(hw, "Failed to init cmd: %d", ret);
2812                 return ret;
2813         }
2814
2815         if (hw->reset.level == HNS3_VF_FULL_RESET) {
2816                 /*
2817                  * UIO enables msix by writing the pcie configuration space
2818                  * vfio_pci enables msix in rte_intr_enable.
2819                  */
2820                 if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO ||
2821                     pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) {
2822                         if (hns3vf_enable_msix(pci_dev, true))
2823                                 hns3_err(hw, "Failed to enable msix");
2824                 }
2825
2826                 rte_intr_enable(&pci_dev->intr_handle);
2827         }
2828
2829         ret = hns3_reset_all_tqps(hns);
2830         if (ret) {
2831                 hns3_err(hw, "Failed to reset all queues: %d", ret);
2832                 return ret;
2833         }
2834
2835         ret = hns3vf_init_hardware(hns);
2836         if (ret) {
2837                 hns3_err(hw, "Failed to init hardware: %d", ret);
2838                 return ret;
2839         }
2840
2841         return 0;
2842 }
2843
2844 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
2845         .dev_configure      = hns3vf_dev_configure,
2846         .dev_start          = hns3vf_dev_start,
2847         .dev_stop           = hns3vf_dev_stop,
2848         .dev_close          = hns3vf_dev_close,
2849         .mtu_set            = hns3vf_dev_mtu_set,
2850         .promiscuous_enable = hns3vf_dev_promiscuous_enable,
2851         .promiscuous_disable = hns3vf_dev_promiscuous_disable,
2852         .allmulticast_enable = hns3vf_dev_allmulticast_enable,
2853         .allmulticast_disable = hns3vf_dev_allmulticast_disable,
2854         .stats_get          = hns3_stats_get,
2855         .stats_reset        = hns3_stats_reset,
2856         .xstats_get         = hns3_dev_xstats_get,
2857         .xstats_get_names   = hns3_dev_xstats_get_names,
2858         .xstats_reset       = hns3_dev_xstats_reset,
2859         .xstats_get_by_id   = hns3_dev_xstats_get_by_id,
2860         .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
2861         .dev_infos_get      = hns3vf_dev_infos_get,
2862         .fw_version_get     = hns3vf_fw_version_get,
2863         .rx_queue_setup     = hns3_rx_queue_setup,
2864         .tx_queue_setup     = hns3_tx_queue_setup,
2865         .rx_queue_release   = hns3_dev_rx_queue_release,
2866         .tx_queue_release   = hns3_dev_tx_queue_release,
2867         .rx_queue_start     = hns3_dev_rx_queue_start,
2868         .rx_queue_stop      = hns3_dev_rx_queue_stop,
2869         .tx_queue_start     = hns3_dev_tx_queue_start,
2870         .tx_queue_stop      = hns3_dev_tx_queue_stop,
2871         .rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
2872         .rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
2873         .rxq_info_get       = hns3_rxq_info_get,
2874         .txq_info_get       = hns3_txq_info_get,
2875         .rx_burst_mode_get  = hns3_rx_burst_mode_get,
2876         .tx_burst_mode_get  = hns3_tx_burst_mode_get,
2877         .mac_addr_add       = hns3vf_add_mac_addr,
2878         .mac_addr_remove    = hns3vf_remove_mac_addr,
2879         .mac_addr_set       = hns3vf_set_default_mac_addr,
2880         .set_mc_addr_list   = hns3vf_set_mc_mac_addr_list,
2881         .link_update        = hns3vf_dev_link_update,
2882         .rss_hash_update    = hns3_dev_rss_hash_update,
2883         .rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
2884         .reta_update        = hns3_dev_rss_reta_update,
2885         .reta_query         = hns3_dev_rss_reta_query,
2886         .flow_ops_get       = hns3_dev_flow_ops_get,
2887         .vlan_filter_set    = hns3vf_vlan_filter_set,
2888         .vlan_offload_set   = hns3vf_vlan_offload_set,
2889         .get_reg            = hns3_get_regs,
2890         .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
2891         .tx_done_cleanup    = hns3_tx_done_cleanup,
2892 };
2893
2894 static const struct hns3_reset_ops hns3vf_reset_ops = {
2895         .reset_service       = hns3vf_reset_service,
2896         .stop_service        = hns3vf_stop_service,
2897         .prepare_reset       = hns3vf_prepare_reset,
2898         .wait_hardware_ready = hns3vf_wait_hardware_ready,
2899         .reinit_dev          = hns3vf_reinit_dev,
2900         .restore_conf        = hns3vf_restore_conf,
2901         .start_service       = hns3vf_start_service,
2902 };
2903
2904 static int
2905 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
2906 {
2907         struct hns3_adapter *hns = eth_dev->data->dev_private;
2908         struct hns3_hw *hw = &hns->hw;
2909         int ret;
2910
2911         PMD_INIT_FUNC_TRACE();
2912
2913         eth_dev->process_private = (struct hns3_process_private *)
2914             rte_zmalloc_socket("hns3_filter_list",
2915                                sizeof(struct hns3_process_private),
2916                                RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
2917         if (eth_dev->process_private == NULL) {
2918                 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
2919                 return -ENOMEM;
2920         }
2921
2922         hns3_flow_init(eth_dev);
2923
2924         hns3_set_rxtx_function(eth_dev);
2925         eth_dev->dev_ops = &hns3vf_eth_dev_ops;
2926         eth_dev->rx_queue_count = hns3_rx_queue_count;
2927         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2928                 ret = hns3_mp_init_secondary();
2929                 if (ret) {
2930                         PMD_INIT_LOG(ERR, "Failed to init for secondary "
2931                                           "process, ret = %d", ret);
2932                         goto err_mp_init_secondary;
2933                 }
2934
2935                 hw->secondary_cnt++;
2936                 return 0;
2937         }
2938
2939         ret = hns3_mp_init_primary();
2940         if (ret) {
2941                 PMD_INIT_LOG(ERR,
2942                              "Failed to init for primary process, ret = %d",
2943                              ret);
2944                 goto err_mp_init_primary;
2945         }
2946
2947         hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2948         hns->is_vf = true;
2949         hw->data = eth_dev->data;
2950         hns3_parse_devargs(eth_dev);
2951
2952         ret = hns3_reset_init(hw);
2953         if (ret)
2954                 goto err_init_reset;
2955         hw->reset.ops = &hns3vf_reset_ops;
2956
2957         ret = hns3vf_init_vf(eth_dev);
2958         if (ret) {
2959                 PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2960                 goto err_init_vf;
2961         }
2962
2963         /* Allocate memory for storing MAC addresses */
2964         eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac",
2965                                                sizeof(struct rte_ether_addr) *
2966                                                HNS3_VF_UC_MACADDR_NUM, 0);
2967         if (eth_dev->data->mac_addrs == NULL) {
2968                 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
2969                              "to store MAC addresses",
2970                              sizeof(struct rte_ether_addr) *
2971                              HNS3_VF_UC_MACADDR_NUM);
2972                 ret = -ENOMEM;
2973                 goto err_rte_zmalloc;
2974         }
2975
2976         /*
2977          * The hns3 PF ethdev driver in kernel support setting VF MAC address
2978          * on the host by "ip link set ..." command. To avoid some incorrect
2979          * scenes, for example, hns3 VF PMD driver fails to receive and send
2980          * packets after user configure the MAC address by using the
2981          * "ip link set ..." command, hns3 VF PMD driver keep the same MAC
2982          * address strategy as the hns3 kernel ethdev driver in the
2983          * initialization. If user configure a MAC address by the ip command
2984          * for VF device, then hns3 VF PMD driver will start with it, otherwise
2985          * start with a random MAC address in the initialization.
2986          */
2987         if (rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr))
2988                 rte_eth_random_addr(hw->mac.mac_addr);
2989         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
2990                             &eth_dev->data->mac_addrs[0]);
2991
2992         hw->adapter_state = HNS3_NIC_INITIALIZED;
2993
2994         if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2995                             SCHEDULE_PENDING) {
2996                 hns3_err(hw, "Reschedule reset service after dev_init");
2997                 hns3_schedule_reset(hns);
2998         } else {
2999                 /* IMP will wait ready flag before reset */
3000                 hns3_notify_reset_ready(hw, false);
3001         }
3002         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
3003                           eth_dev);
3004         return 0;
3005
3006 err_rte_zmalloc:
3007         hns3vf_uninit_vf(eth_dev);
3008
3009 err_init_vf:
3010         rte_free(hw->reset.wait_data);
3011
3012 err_init_reset:
3013         hns3_mp_uninit_primary();
3014
3015 err_mp_init_primary:
3016 err_mp_init_secondary:
3017         eth_dev->dev_ops = NULL;
3018         eth_dev->rx_pkt_burst = NULL;
3019         eth_dev->rx_descriptor_status = NULL;
3020         eth_dev->tx_pkt_burst = NULL;
3021         eth_dev->tx_pkt_prepare = NULL;
3022         eth_dev->tx_descriptor_status = NULL;
3023         rte_free(eth_dev->process_private);
3024         eth_dev->process_private = NULL;
3025
3026         return ret;
3027 }
3028
3029 static int
3030 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
3031 {
3032         struct hns3_adapter *hns = eth_dev->data->dev_private;
3033         struct hns3_hw *hw = &hns->hw;
3034
3035         PMD_INIT_FUNC_TRACE();
3036
3037         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3038                 rte_free(eth_dev->process_private);
3039                 eth_dev->process_private = NULL;
3040                 return 0;
3041         }
3042
3043         if (hw->adapter_state < HNS3_NIC_CLOSING)
3044                 hns3vf_dev_close(eth_dev);
3045
3046         hw->adapter_state = HNS3_NIC_REMOVED;
3047         return 0;
3048 }
3049
3050 static int
3051 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3052                      struct rte_pci_device *pci_dev)
3053 {
3054         return rte_eth_dev_pci_generic_probe(pci_dev,
3055                                              sizeof(struct hns3_adapter),
3056                                              hns3vf_dev_init);
3057 }
3058
3059 static int
3060 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
3061 {
3062         return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
3063 }
3064
3065 static const struct rte_pci_id pci_id_hns3vf_map[] = {
3066         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
3067         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
3068         { .vendor_id = 0, }, /* sentinel */
3069 };
3070
3071 static struct rte_pci_driver rte_hns3vf_pmd = {
3072         .id_table = pci_id_hns3vf_map,
3073         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3074         .probe = eth_hns3vf_pci_probe,
3075         .remove = eth_hns3vf_pci_remove,
3076 };
3077
3078 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
3079 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
3080 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");
3081 RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf,
3082                 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
3083                 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
3084                 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> ");