505525eba6b46071d9a631c491841f86cec56e94
[dpdk.git] / drivers / net / hns3 / hns3_ethdev_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <arpa/inet.h>
12 #include <linux/pci_regs.h>
13
14 #include <rte_alarm.h>
15 #include <rte_atomic.h>
16 #include <rte_bus_pci.h>
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_eal.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_ethdev_pci.h>
25 #include <rte_interrupts.h>
26 #include <rte_io.h>
27 #include <rte_log.h>
28 #include <rte_pci.h>
29 #include <rte_vfio.h>
30
31 #include "hns3_ethdev.h"
32 #include "hns3_logs.h"
33 #include "hns3_rxtx.h"
34 #include "hns3_regs.h"
35 #include "hns3_intr.h"
36 #include "hns3_dcb.h"
37 #include "hns3_mp.h"
38
39 #define HNS3VF_KEEP_ALIVE_INTERVAL      2000000 /* us */
40 #define HNS3VF_SERVICE_INTERVAL         1000000 /* us */
41
42 #define HNS3VF_RESET_WAIT_MS    20
43 #define HNS3VF_RESET_WAIT_CNT   2000
44
45 /* Reset related Registers */
46 #define HNS3_GLOBAL_RESET_BIT           0
47 #define HNS3_CORE_RESET_BIT             1
48 #define HNS3_IMP_RESET_BIT              2
49 #define HNS3_FUN_RST_ING_B              0
50
51 enum hns3vf_evt_cause {
52         HNS3VF_VECTOR0_EVENT_RST,
53         HNS3VF_VECTOR0_EVENT_MBX,
54         HNS3VF_VECTOR0_EVENT_OTHER,
55 };
56
57 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
58                                                     uint64_t *levels);
59 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
60 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
61
62 /* set PCI bus mastering */
63 static void
64 hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
65 {
66         uint16_t reg;
67
68         rte_pci_read_config(device, &reg, sizeof(reg), PCI_COMMAND);
69
70         if (op)
71                 /* set the master bit */
72                 reg |= PCI_COMMAND_MASTER;
73         else
74                 reg &= ~(PCI_COMMAND_MASTER);
75
76         rte_pci_write_config(device, &reg, sizeof(reg), PCI_COMMAND);
77 }
78
79 /**
80  * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
81  * @cap: the capability
82  *
83  * Return the address of the given capability within the PCI capability list.
84  */
85 static int
86 hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
87 {
88 #define MAX_PCIE_CAPABILITY 48
89         uint16_t status;
90         uint8_t pos;
91         uint8_t id;
92         int ttl;
93
94         rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
95         if (!(status & PCI_STATUS_CAP_LIST))
96                 return 0;
97
98         ttl = MAX_PCIE_CAPABILITY;
99         rte_pci_read_config(device, &pos, sizeof(pos), PCI_CAPABILITY_LIST);
100         while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
101                 rte_pci_read_config(device, &id, sizeof(id),
102                                     (pos + PCI_CAP_LIST_ID));
103
104                 if (id == 0xFF)
105                         break;
106
107                 if (id == cap)
108                         return (int)pos;
109
110                 rte_pci_read_config(device, &pos, sizeof(pos),
111                                     (pos + PCI_CAP_LIST_NEXT));
112         }
113         return 0;
114 }
115
116 static int
117 hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
118 {
119         uint16_t control;
120         int pos;
121
122         pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
123         if (pos) {
124                 rte_pci_read_config(device, &control, sizeof(control),
125                                     (pos + PCI_MSIX_FLAGS));
126                 if (op)
127                         control |= PCI_MSIX_FLAGS_ENABLE;
128                 else
129                         control &= ~PCI_MSIX_FLAGS_ENABLE;
130                 rte_pci_write_config(device, &control, sizeof(control),
131                                      (pos + PCI_MSIX_FLAGS));
132                 return 0;
133         }
134         return -ENXIO;
135 }
136
137 static int
138 hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
139                     __attribute__ ((unused)) uint32_t idx,
140                     __attribute__ ((unused)) uint32_t pool)
141 {
142         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
143         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
144         int ret;
145
146         rte_spinlock_lock(&hw->lock);
147         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
148                                 HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
149                                 RTE_ETHER_ADDR_LEN, false, NULL, 0);
150         rte_spinlock_unlock(&hw->lock);
151         if (ret) {
152                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
153                                       mac_addr);
154                 hns3_err(hw, "Failed to add mac addr(%s) for vf: %d", mac_str,
155                          ret);
156         }
157
158         return ret;
159 }
160
161 static void
162 hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
163 {
164         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
165         /* index will be checked by upper level rte interface */
166         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
167         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
168         int ret;
169
170         rte_spinlock_lock(&hw->lock);
171         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
172                                 HNS3_MBX_MAC_VLAN_UC_REMOVE,
173                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
174                                 NULL, 0);
175         rte_spinlock_unlock(&hw->lock);
176         if (ret) {
177                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
178                                       mac_addr);
179                 hns3_err(hw, "Failed to remove mac addr(%s) for vf: %d",
180                          mac_str, ret);
181         }
182 }
183
184 static int
185 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
186                             struct rte_ether_addr *mac_addr)
187 {
188 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
189         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
190         struct rte_ether_addr *old_addr;
191         uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
192         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
193         int ret;
194
195         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
196                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
197                                       mac_addr);
198                 hns3_err(hw, "Failed to set mac addr, addr(%s) invalid.",
199                          mac_str);
200                 return -EINVAL;
201         }
202
203         old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
204         rte_spinlock_lock(&hw->lock);
205         memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
206         memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
207                RTE_ETHER_ADDR_LEN);
208
209         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
210                                 HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
211                                 HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
212         if (ret) {
213                 /*
214                  * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev
215                  * driver. When user has configured a MAC address for VF device
216                  * by "ip link set ..." command based on the PF device, the hns3
217                  * PF kernel ethdev driver does not allow VF driver to request
218                  * reconfiguring a different default MAC address, and return
219                  * -EPREM to VF driver through mailbox.
220                  */
221                 if (ret == -EPERM) {
222                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
223                                               old_addr);
224                         hns3_warn(hw, "Has permanet mac addr(%s) for vf",
225                                   mac_str);
226                 } else {
227                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
228                                               mac_addr);
229                         hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
230                                  mac_str, ret);
231                 }
232         }
233
234         rte_ether_addr_copy(mac_addr,
235                             (struct rte_ether_addr *)hw->mac.mac_addr);
236         rte_spinlock_unlock(&hw->lock);
237
238         return ret;
239 }
240
241 static int
242 hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del)
243 {
244         struct hns3_hw *hw = &hns->hw;
245         struct rte_ether_addr *addr;
246         enum hns3_mbx_mac_vlan_subcode opcode;
247         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
248         int ret = 0;
249         int i;
250
251         if (del)
252                 opcode = HNS3_MBX_MAC_VLAN_UC_REMOVE;
253         else
254                 opcode = HNS3_MBX_MAC_VLAN_UC_ADD;
255         for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) {
256                 addr = &hw->data->mac_addrs[i];
257                 if (!rte_is_valid_assigned_ether_addr(addr))
258                         continue;
259                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
260                 hns3_dbg(hw, "rm mac addr: %s", mac_str);
261                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, opcode,
262                                         addr->addr_bytes, RTE_ETHER_ADDR_LEN,
263                                         false, NULL, 0);
264                 if (ret) {
265                         hns3_err(hw, "Failed to remove mac addr for vf: %d",
266                                  ret);
267                         break;
268                 }
269         }
270         return ret;
271 }
272
273 static int
274 hns3vf_add_mc_mac_addr(struct hns3_adapter *hns,
275                        struct rte_ether_addr *mac_addr)
276 {
277         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
278         struct hns3_hw *hw = &hns->hw;
279         int ret;
280
281         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
282                                 HNS3_MBX_MAC_VLAN_MC_ADD,
283                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
284                                 NULL, 0);
285         if (ret) {
286                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
287                                       mac_addr);
288                 hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
289                          mac_str, ret);
290                 return ret;
291         }
292
293         return 0;
294 }
295
296 static int
297 hns3vf_remove_mc_mac_addr(struct hns3_adapter *hns,
298                           struct rte_ether_addr *mac_addr)
299 {
300         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
301         struct hns3_hw *hw = &hns->hw;
302         int ret;
303
304         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
305                                 HNS3_MBX_MAC_VLAN_MC_REMOVE,
306                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
307                                 NULL, 0);
308         if (ret) {
309                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
310                                       mac_addr);
311                 hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
312                          mac_str, ret);
313                 return ret;
314         }
315
316         return 0;
317 }
318
319 static int
320 hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev,
321                             struct rte_ether_addr *mc_addr_set,
322                             uint32_t nb_mc_addr)
323 {
324         struct hns3_adapter *hns = dev->data->dev_private;
325         struct hns3_hw *hw = &hns->hw;
326         struct rte_ether_addr *addr;
327         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
328         int cur_addr_num;
329         int set_addr_num;
330         int num;
331         int ret;
332         int i;
333
334         if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
335                 hns3_err(hw, "Failed to set mc mac addr, nb_mc_addr(%d) "
336                          "invalid. valid range: 0~%d",
337                          nb_mc_addr, HNS3_MC_MACADDR_NUM);
338                 return -EINVAL;
339         }
340
341         set_addr_num = (int)nb_mc_addr;
342         for (i = 0; i < set_addr_num; i++) {
343                 addr = &mc_addr_set[i];
344                 if (!rte_is_multicast_ether_addr(addr)) {
345                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
346                                               addr);
347                         hns3_err(hw,
348                                  "Failed to set mc mac addr, addr(%s) invalid.",
349                                  mac_str);
350                         return -EINVAL;
351                 }
352         }
353         rte_spinlock_lock(&hw->lock);
354         cur_addr_num = hw->mc_addrs_num;
355         for (i = 0; i < cur_addr_num; i++) {
356                 num = cur_addr_num - i - 1;
357                 addr = &hw->mc_addrs[num];
358                 ret = hns3vf_remove_mc_mac_addr(hns, addr);
359                 if (ret) {
360                         rte_spinlock_unlock(&hw->lock);
361                         return ret;
362                 }
363
364                 hw->mc_addrs_num--;
365         }
366
367         for (i = 0; i < set_addr_num; i++) {
368                 addr = &mc_addr_set[i];
369                 ret = hns3vf_add_mc_mac_addr(hns, addr);
370                 if (ret) {
371                         rte_spinlock_unlock(&hw->lock);
372                         return ret;
373                 }
374
375                 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
376                 hw->mc_addrs_num++;
377         }
378         rte_spinlock_unlock(&hw->lock);
379
380         return 0;
381 }
382
383 static int
384 hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
385 {
386         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
387         struct hns3_hw *hw = &hns->hw;
388         struct rte_ether_addr *addr;
389         int err = 0;
390         int ret;
391         int i;
392
393         for (i = 0; i < hw->mc_addrs_num; i++) {
394                 addr = &hw->mc_addrs[i];
395                 if (!rte_is_multicast_ether_addr(addr))
396                         continue;
397                 if (del)
398                         ret = hns3vf_remove_mc_mac_addr(hns, addr);
399                 else
400                         ret = hns3vf_add_mc_mac_addr(hns, addr);
401                 if (ret) {
402                         err = ret;
403                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
404                                               addr);
405                         hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d",
406                                  del ? "Remove" : "Restore", mac_str, ret);
407                 }
408         }
409         return err;
410 }
411
412 static int
413 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
414 {
415         struct hns3_mbx_vf_to_pf_cmd *req;
416         struct hns3_cmd_desc desc;
417         int ret;
418
419         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
420
421         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
422         req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
423         req->msg[1] = en_bc_pmc ? 1 : 0;
424
425         ret = hns3_cmd_send(hw, &desc, 1);
426         if (ret)
427                 hns3_err(hw, "Set promisc mode fail, status is %d", ret);
428
429         return ret;
430 }
431
432 static int
433 hns3vf_dev_configure(struct rte_eth_dev *dev)
434 {
435         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
436         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
437         struct rte_eth_conf *conf = &dev->data->dev_conf;
438         enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
439         uint16_t nb_rx_q = dev->data->nb_rx_queues;
440         uint16_t nb_tx_q = dev->data->nb_tx_queues;
441         struct rte_eth_rss_conf rss_conf;
442         uint16_t mtu;
443         int ret;
444
445         /*
446          * Hardware does not support individually enable/disable/reset the Tx or
447          * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
448          * and Rx queues at the same time. When the numbers of Tx queues
449          * allocated by upper applications are not equal to the numbers of Rx
450          * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
451          * of Tx/Rx queues. otherwise, network engine can not work as usual. But
452          * these fake queues are imperceptible, and can not be used by upper
453          * applications.
454          */
455         ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
456         if (ret) {
457                 hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
458                 return ret;
459         }
460
461         hw->adapter_state = HNS3_NIC_CONFIGURING;
462         if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
463                 hns3_err(hw, "setting link speed/duplex not supported");
464                 ret = -EINVAL;
465                 goto cfg_err;
466         }
467
468         /* When RSS is not configured, redirect the packet queue 0 */
469         if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
470                 rss_conf = conf->rx_adv_conf.rss_conf;
471                 if (rss_conf.rss_key == NULL) {
472                         rss_conf.rss_key = rss_cfg->key;
473                         rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
474                 }
475
476                 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
477                 if (ret)
478                         goto cfg_err;
479         }
480
481         /*
482          * If jumbo frames are enabled, MTU needs to be refreshed
483          * according to the maximum RX packet length.
484          */
485         if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
486                 /*
487                  * Security of max_rx_pkt_len is guaranteed in dpdk frame.
488                  * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
489                  * can safely assign to "uint16_t" type variable.
490                  */
491                 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
492                 ret = hns3vf_dev_mtu_set(dev, mtu);
493                 if (ret)
494                         goto cfg_err;
495                 dev->data->mtu = mtu;
496         }
497
498         ret = hns3vf_dev_configure_vlan(dev);
499         if (ret)
500                 goto cfg_err;
501
502         hw->adapter_state = HNS3_NIC_CONFIGURED;
503         return 0;
504
505 cfg_err:
506         (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
507         hw->adapter_state = HNS3_NIC_INITIALIZED;
508
509         return ret;
510 }
511
512 static int
513 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
514 {
515         int ret;
516
517         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
518                                 sizeof(mtu), true, NULL, 0);
519         if (ret)
520                 hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
521
522         return ret;
523 }
524
525 static int
526 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
527 {
528         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
529         uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
530         int ret;
531
532         if (dev->data->dev_started) {
533                 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
534                          "before configuration", dev->data->port_id);
535                 return -EBUSY;
536         }
537
538         if (rte_atomic16_read(&hw->reset.resetting)) {
539                 hns3_err(hw, "Failed to set mtu during resetting");
540                 return -EIO;
541         }
542
543         rte_spinlock_lock(&hw->lock);
544         ret = hns3vf_config_mtu(hw, mtu);
545         if (ret) {
546                 rte_spinlock_unlock(&hw->lock);
547                 return ret;
548         }
549         if (frame_size > RTE_ETHER_MAX_LEN)
550                 dev->data->dev_conf.rxmode.offloads |=
551                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
552         else
553                 dev->data->dev_conf.rxmode.offloads &=
554                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
555         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
556         rte_spinlock_unlock(&hw->lock);
557
558         return 0;
559 }
560
561 static int
562 hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
563 {
564         struct hns3_adapter *hns = eth_dev->data->dev_private;
565         struct hns3_hw *hw = &hns->hw;
566
567         info->max_rx_queues = hw->tqps_num;
568         info->max_tx_queues = hw->tqps_num;
569         info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
570         info->min_rx_bufsize = hw->rx_buf_len;
571         info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
572         info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
573
574         info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
575                                  DEV_RX_OFFLOAD_UDP_CKSUM |
576                                  DEV_RX_OFFLOAD_TCP_CKSUM |
577                                  DEV_RX_OFFLOAD_SCTP_CKSUM |
578                                  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
579                                  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
580                                  DEV_RX_OFFLOAD_KEEP_CRC |
581                                  DEV_RX_OFFLOAD_SCATTER |
582                                  DEV_RX_OFFLOAD_VLAN_STRIP |
583                                  DEV_RX_OFFLOAD_QINQ_STRIP |
584                                  DEV_RX_OFFLOAD_VLAN_FILTER |
585                                  DEV_RX_OFFLOAD_JUMBO_FRAME);
586         info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
587         info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
588                                  DEV_TX_OFFLOAD_IPV4_CKSUM |
589                                  DEV_TX_OFFLOAD_TCP_CKSUM |
590                                  DEV_TX_OFFLOAD_UDP_CKSUM |
591                                  DEV_TX_OFFLOAD_SCTP_CKSUM |
592                                  DEV_TX_OFFLOAD_VLAN_INSERT |
593                                  DEV_TX_OFFLOAD_QINQ_INSERT |
594                                  DEV_TX_OFFLOAD_MULTI_SEGS |
595                                  info->tx_queue_offload_capa);
596
597         info->rx_desc_lim = (struct rte_eth_desc_lim) {
598                 .nb_max = HNS3_MAX_RING_DESC,
599                 .nb_min = HNS3_MIN_RING_DESC,
600                 .nb_align = HNS3_ALIGN_RING_DESC,
601         };
602
603         info->tx_desc_lim = (struct rte_eth_desc_lim) {
604                 .nb_max = HNS3_MAX_RING_DESC,
605                 .nb_min = HNS3_MIN_RING_DESC,
606                 .nb_align = HNS3_ALIGN_RING_DESC,
607         };
608
609         info->vmdq_queue_num = 0;
610
611         info->reta_size = HNS3_RSS_IND_TBL_SIZE;
612         info->hash_key_size = HNS3_RSS_KEY_SIZE;
613         info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
614         info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
615         info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
616
617         return 0;
618 }
619
620 static void
621 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
622 {
623         hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
624 }
625
626 static void
627 hns3vf_disable_irq0(struct hns3_hw *hw)
628 {
629         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
630 }
631
632 static void
633 hns3vf_enable_irq0(struct hns3_hw *hw)
634 {
635         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
636 }
637
638 static enum hns3vf_evt_cause
639 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
640 {
641         struct hns3_hw *hw = &hns->hw;
642         enum hns3vf_evt_cause ret;
643         uint32_t cmdq_stat_reg;
644         uint32_t rst_ing_reg;
645         uint32_t val;
646
647         /* Fetch the events from their corresponding regs */
648         cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
649
650         if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
651                 rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
652                 hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
653                 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
654                 rte_atomic16_set(&hw->reset.disable_cmd, 1);
655                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
656                 hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
657                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
658                 if (clearval) {
659                         hw->reset.stats.global_cnt++;
660                         hns3_warn(hw, "Global reset detected, clear reset status");
661                 } else {
662                         hns3_schedule_delayed_reset(hns);
663                         hns3_warn(hw, "Global reset detected, don't clear reset status");
664                 }
665
666                 ret = HNS3VF_VECTOR0_EVENT_RST;
667                 goto out;
668         }
669
670         /* Check for vector0 mailbox(=CMDQ RX) event source */
671         if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
672                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
673                 ret = HNS3VF_VECTOR0_EVENT_MBX;
674                 goto out;
675         }
676
677         val = 0;
678         ret = HNS3VF_VECTOR0_EVENT_OTHER;
679 out:
680         if (clearval)
681                 *clearval = val;
682         return ret;
683 }
684
685 static void
686 hns3vf_interrupt_handler(void *param)
687 {
688         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
689         struct hns3_adapter *hns = dev->data->dev_private;
690         struct hns3_hw *hw = &hns->hw;
691         enum hns3vf_evt_cause event_cause;
692         uint32_t clearval;
693
694         if (hw->irq_thread_id == 0)
695                 hw->irq_thread_id = pthread_self();
696
697         /* Disable interrupt */
698         hns3vf_disable_irq0(hw);
699
700         /* Read out interrupt causes */
701         event_cause = hns3vf_check_event_cause(hns, &clearval);
702
703         switch (event_cause) {
704         case HNS3VF_VECTOR0_EVENT_RST:
705                 hns3_schedule_reset(hns);
706                 break;
707         case HNS3VF_VECTOR0_EVENT_MBX:
708                 hns3_dev_handle_mbx_msg(hw);
709                 break;
710         default:
711                 break;
712         }
713
714         /* Clear interrupt causes */
715         hns3vf_clear_event_cause(hw, clearval);
716
717         /* Enable interrupt */
718         hns3vf_enable_irq0(hw);
719 }
720
721 static int
722 hns3vf_check_tqp_info(struct hns3_hw *hw)
723 {
724         uint16_t tqps_num;
725
726         tqps_num = hw->tqps_num;
727         if (tqps_num > HNS3_MAX_TQP_NUM_PER_FUNC || tqps_num == 0) {
728                 PMD_INIT_LOG(ERR, "Get invalid tqps_num(%u) from PF. valid "
729                                   "range: 1~%d",
730                              tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
731                 return -EINVAL;
732         }
733
734         if (hw->rx_buf_len == 0)
735                 hw->rx_buf_len = HNS3_DEFAULT_RX_BUF_LEN;
736         hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, hw->tqps_num);
737
738         return 0;
739 }
740
741 static int
742 hns3vf_get_queue_info(struct hns3_hw *hw)
743 {
744 #define HNS3VF_TQPS_RSS_INFO_LEN        6
745         uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
746         int ret;
747
748         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
749                                 resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
750         if (ret) {
751                 PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
752                 return ret;
753         }
754
755         memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
756         memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
757         memcpy(&hw->rx_buf_len, &resp_msg[4], sizeof(uint16_t));
758
759         return hns3vf_check_tqp_info(hw);
760 }
761
762 static int
763 hns3vf_get_queue_depth(struct hns3_hw *hw)
764 {
765 #define HNS3VF_TQPS_DEPTH_INFO_LEN      4
766         uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN];
767         int ret;
768
769         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true,
770                                 resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN);
771         if (ret) {
772                 PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d",
773                              ret);
774                 return ret;
775         }
776
777         memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t));
778         memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t));
779
780         return 0;
781 }
782
783 static int
784 hns3vf_get_tc_info(struct hns3_hw *hw)
785 {
786         uint8_t resp_msg;
787         int ret;
788
789         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0,
790                                 true, &resp_msg, sizeof(resp_msg));
791         if (ret) {
792                 hns3_err(hw, "VF request to get TC info from PF failed %d",
793                          ret);
794                 return ret;
795         }
796
797         hw->hw_tc_map = resp_msg;
798
799         return 0;
800 }
801
802 static int
803 hns3vf_get_host_mac_addr(struct hns3_hw *hw)
804 {
805         uint8_t host_mac[RTE_ETHER_ADDR_LEN];
806         int ret;
807
808         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
809                                 true, host_mac, RTE_ETHER_ADDR_LEN);
810         if (ret) {
811                 hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
812                 return ret;
813         }
814
815         memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
816
817         return 0;
818 }
819
820 static int
821 hns3vf_get_configuration(struct hns3_hw *hw)
822 {
823         int ret;
824
825         hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
826
827         /* Get queue configuration from PF */
828         ret = hns3vf_get_queue_info(hw);
829         if (ret)
830                 return ret;
831
832         /* Get queue depth info from PF */
833         ret = hns3vf_get_queue_depth(hw);
834         if (ret)
835                 return ret;
836
837         /* Get user defined VF MAC addr from PF */
838         ret = hns3vf_get_host_mac_addr(hw);
839         if (ret)
840                 return ret;
841
842         /* Get tc configuration from PF */
843         return hns3vf_get_tc_info(hw);
844 }
845
846 static int
847 hns3vf_set_tc_info(struct hns3_adapter *hns)
848 {
849         struct hns3_hw *hw = &hns->hw;
850         uint16_t nb_rx_q = hw->data->nb_rx_queues;
851         uint16_t nb_tx_q = hw->data->nb_tx_queues;
852         uint8_t i;
853
854         hw->num_tc = 0;
855         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
856                 if (hw->hw_tc_map & BIT(i))
857                         hw->num_tc++;
858
859         if (nb_rx_q < hw->num_tc) {
860                 hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
861                          nb_rx_q, hw->num_tc);
862                 return -EINVAL;
863         }
864
865         if (nb_tx_q < hw->num_tc) {
866                 hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
867                          nb_tx_q, hw->num_tc);
868                 return -EINVAL;
869         }
870
871         hns3_set_rss_size(hw, nb_rx_q);
872         hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
873
874         return 0;
875 }
876
877 static void
878 hns3vf_request_link_info(struct hns3_hw *hw)
879 {
880         uint8_t resp_msg;
881         int ret;
882
883         if (rte_atomic16_read(&hw->reset.resetting))
884                 return;
885         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
886                                 &resp_msg, sizeof(resp_msg));
887         if (ret)
888                 hns3_err(hw, "Failed to fetch link status from PF: %d", ret);
889 }
890
891 static int
892 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
893 {
894 #define HNS3VF_VLAN_MBX_MSG_LEN 5
895         struct hns3_hw *hw = &hns->hw;
896         uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
897         uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
898         uint8_t is_kill = on ? 0 : 1;
899
900         msg_data[0] = is_kill;
901         memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
902         memcpy(&msg_data[3], &proto, sizeof(proto));
903
904         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
905                                  msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
906                                  0);
907 }
908
909 static int
910 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
911 {
912         struct hns3_adapter *hns = dev->data->dev_private;
913         struct hns3_hw *hw = &hns->hw;
914         int ret;
915
916         if (rte_atomic16_read(&hw->reset.resetting)) {
917                 hns3_err(hw,
918                          "vf set vlan id failed during resetting, vlan_id =%u",
919                          vlan_id);
920                 return -EIO;
921         }
922         rte_spinlock_lock(&hw->lock);
923         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
924         rte_spinlock_unlock(&hw->lock);
925         if (ret)
926                 hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
927                          vlan_id, ret);
928
929         return ret;
930 }
931
932 static int
933 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
934 {
935         uint8_t msg_data;
936         int ret;
937
938         msg_data = enable ? 1 : 0;
939         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
940                                 &msg_data, sizeof(msg_data), false, NULL, 0);
941         if (ret)
942                 hns3_err(hw, "vf enable strip failed, ret =%d", ret);
943
944         return ret;
945 }
946
947 static int
948 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
949 {
950         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
951         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
952         unsigned int tmp_mask;
953
954         tmp_mask = (unsigned int)mask;
955         /* Vlan stripping setting */
956         if (tmp_mask & ETH_VLAN_STRIP_MASK) {
957                 rte_spinlock_lock(&hw->lock);
958                 /* Enable or disable VLAN stripping */
959                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
960                         hns3vf_en_hw_strip_rxvtag(hw, true);
961                 else
962                         hns3vf_en_hw_strip_rxvtag(hw, false);
963                 rte_spinlock_unlock(&hw->lock);
964         }
965
966         return 0;
967 }
968
969 static int
970 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
971 {
972         struct rte_vlan_filter_conf *vfc;
973         struct hns3_hw *hw = &hns->hw;
974         uint16_t vlan_id;
975         uint64_t vbit;
976         uint64_t ids;
977         int ret = 0;
978         uint32_t i;
979
980         vfc = &hw->data->vlan_filter_conf;
981         for (i = 0; i < RTE_DIM(vfc->ids); i++) {
982                 if (vfc->ids[i] == 0)
983                         continue;
984                 ids = vfc->ids[i];
985                 while (ids) {
986                         /*
987                          * 64 means the num bits of ids, one bit corresponds to
988                          * one vlan id
989                          */
990                         vlan_id = 64 * i;
991                         /* count trailing zeroes */
992                         vbit = ~ids & (ids - 1);
993                         /* clear least significant bit set */
994                         ids ^= (ids ^ (ids - 1)) ^ vbit;
995                         for (; vbit;) {
996                                 vbit >>= 1;
997                                 vlan_id++;
998                         }
999                         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1000                         if (ret) {
1001                                 hns3_err(hw,
1002                                          "VF handle vlan table failed, ret =%d, on = %d",
1003                                          ret, on);
1004                                 return ret;
1005                         }
1006                 }
1007         }
1008
1009         return ret;
1010 }
1011
1012 static int
1013 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1014 {
1015         return hns3vf_handle_all_vlan_table(hns, 0);
1016 }
1017
1018 static int
1019 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1020 {
1021         struct hns3_hw *hw = &hns->hw;
1022         struct rte_eth_conf *dev_conf;
1023         bool en;
1024         int ret;
1025
1026         dev_conf = &hw->data->dev_conf;
1027         en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
1028                                                                    : false;
1029         ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1030         if (ret)
1031                 hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1032                          ret);
1033         return ret;
1034 }
1035
1036 static int
1037 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1038 {
1039         struct hns3_adapter *hns = dev->data->dev_private;
1040         struct rte_eth_dev_data *data = dev->data;
1041         struct hns3_hw *hw = &hns->hw;
1042         int ret;
1043
1044         if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1045             data->dev_conf.txmode.hw_vlan_reject_untagged ||
1046             data->dev_conf.txmode.hw_vlan_insert_pvid) {
1047                 hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1048                               "or hw_vlan_insert_pvid is not support!");
1049         }
1050
1051         /* Apply vlan offload setting */
1052         ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1053         if (ret)
1054                 hns3_err(hw, "dev config vlan offload failed, ret =%d", ret);
1055
1056         return ret;
1057 }
1058
1059 static int
1060 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1061 {
1062         uint8_t msg_data;
1063
1064         msg_data = alive ? 1 : 0;
1065         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
1066                                  sizeof(msg_data), false, NULL, 0);
1067 }
1068
1069 static void
1070 hns3vf_keep_alive_handler(void *param)
1071 {
1072         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1073         struct hns3_adapter *hns = eth_dev->data->dev_private;
1074         struct hns3_hw *hw = &hns->hw;
1075         uint8_t respmsg;
1076         int ret;
1077
1078         ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
1079                                 false, &respmsg, sizeof(uint8_t));
1080         if (ret)
1081                 hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1082                          ret);
1083
1084         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1085                           eth_dev);
1086 }
1087
1088 static void
1089 hns3vf_service_handler(void *param)
1090 {
1091         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1092         struct hns3_adapter *hns = eth_dev->data->dev_private;
1093         struct hns3_hw *hw = &hns->hw;
1094
1095         /*
1096          * The query link status and reset processing are executed in the
1097          * interrupt thread.When the IMP reset occurs, IMP will not respond,
1098          * and the query operation will time out after 30ms. In the case of
1099          * multiple PF/VFs, each query failure timeout causes the IMP reset
1100          * interrupt to fail to respond within 100ms.
1101          * Before querying the link status, check whether there is a reset
1102          * pending, and if so, abandon the query.
1103          */
1104         if (!hns3vf_is_reset_pending(hns))
1105                 hns3vf_request_link_info(hw);
1106         else
1107                 hns3_warn(hw, "Cancel the query when reset is pending");
1108
1109         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1110                           eth_dev);
1111 }
1112
1113 static int
1114 hns3vf_init_hardware(struct hns3_adapter *hns)
1115 {
1116         struct hns3_hw *hw = &hns->hw;
1117         uint16_t mtu = hw->data->mtu;
1118         int ret;
1119
1120         ret = hns3vf_set_promisc_mode(hw, true);
1121         if (ret)
1122                 return ret;
1123
1124         ret = hns3vf_config_mtu(hw, mtu);
1125         if (ret)
1126                 goto err_init_hardware;
1127
1128         ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1129         if (ret) {
1130                 PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1131                 goto err_init_hardware;
1132         }
1133
1134         ret = hns3_config_gro(hw, false);
1135         if (ret) {
1136                 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1137                 goto err_init_hardware;
1138         }
1139
1140         ret = hns3vf_set_alive(hw, true);
1141         if (ret) {
1142                 PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1143                 goto err_init_hardware;
1144         }
1145
1146         hns3vf_request_link_info(hw);
1147         return 0;
1148
1149 err_init_hardware:
1150         (void)hns3vf_set_promisc_mode(hw, false);
1151         return ret;
1152 }
1153
1154 static int
1155 hns3vf_clear_vport_list(struct hns3_hw *hw)
1156 {
1157         return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
1158                                  HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
1159                                  NULL, 0);
1160 }
1161
1162 static int
1163 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1164 {
1165         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1166         struct hns3_adapter *hns = eth_dev->data->dev_private;
1167         struct hns3_hw *hw = &hns->hw;
1168         int ret;
1169
1170         PMD_INIT_FUNC_TRACE();
1171
1172         /* Get hardware io base address from pcie BAR2 IO space */
1173         hw->io_base = pci_dev->mem_resource[2].addr;
1174
1175         /* Firmware command queue initialize */
1176         ret = hns3_cmd_init_queue(hw);
1177         if (ret) {
1178                 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1179                 goto err_cmd_init_queue;
1180         }
1181
1182         /* Firmware command initialize */
1183         ret = hns3_cmd_init(hw);
1184         if (ret) {
1185                 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1186                 goto err_cmd_init;
1187         }
1188
1189         rte_spinlock_init(&hw->mbx_resp.lock);
1190
1191         hns3vf_clear_event_cause(hw, 0);
1192
1193         ret = rte_intr_callback_register(&pci_dev->intr_handle,
1194                                          hns3vf_interrupt_handler, eth_dev);
1195         if (ret) {
1196                 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1197                 goto err_intr_callback_register;
1198         }
1199
1200         /* Enable interrupt */
1201         rte_intr_enable(&pci_dev->intr_handle);
1202         hns3vf_enable_irq0(hw);
1203
1204         /* Get configuration from PF */
1205         ret = hns3vf_get_configuration(hw);
1206         if (ret) {
1207                 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1208                 goto err_get_config;
1209         }
1210
1211         /*
1212          * The hns3 PF ethdev driver in kernel support setting VF MAC address
1213          * on the host by "ip link set ..." command. To avoid some incorrect
1214          * scenes, for example, hns3 VF PMD driver fails to receive and send
1215          * packets after user configure the MAC address by using the
1216          * "ip link set ..." command, hns3 VF PMD driver keep the same MAC
1217          * address strategy as the hns3 kernel ethdev driver in the
1218          * initialization. If user configure a MAC address by the ip command
1219          * for VF device, then hns3 VF PMD driver will start with it, otherwise
1220          * start with a random MAC address in the initialization.
1221          */
1222         ret = rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr);
1223         if (ret)
1224                 rte_eth_random_addr(hw->mac.mac_addr);
1225
1226         ret = hns3vf_clear_vport_list(hw);
1227         if (ret) {
1228                 PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1229                 goto err_get_config;
1230         }
1231
1232         ret = hns3vf_init_hardware(hns);
1233         if (ret)
1234                 goto err_get_config;
1235
1236         hns3_set_default_rss_args(hw);
1237
1238         return 0;
1239
1240 err_get_config:
1241         hns3vf_disable_irq0(hw);
1242         rte_intr_disable(&pci_dev->intr_handle);
1243         hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
1244                              eth_dev);
1245 err_intr_callback_register:
1246         hns3_cmd_uninit(hw);
1247
1248 err_cmd_init:
1249         hns3_cmd_destroy_queue(hw);
1250
1251 err_cmd_init_queue:
1252         hw->io_base = NULL;
1253
1254         return ret;
1255 }
1256
1257 static void
1258 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1259 {
1260         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1261         struct hns3_adapter *hns = eth_dev->data->dev_private;
1262         struct hns3_hw *hw = &hns->hw;
1263
1264         PMD_INIT_FUNC_TRACE();
1265
1266         hns3_rss_uninit(hns);
1267         (void)hns3vf_set_alive(hw, false);
1268         (void)hns3vf_set_promisc_mode(hw, false);
1269         hns3vf_disable_irq0(hw);
1270         rte_intr_disable(&pci_dev->intr_handle);
1271         hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
1272                              eth_dev);
1273         hns3_cmd_uninit(hw);
1274         hns3_cmd_destroy_queue(hw);
1275         hw->io_base = NULL;
1276 }
1277
1278 static int
1279 hns3vf_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
1280                              bool mmap, uint16_t queue_id)
1281
1282 {
1283         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1284         struct hns3_vf_bind_vector_msg bind_msg;
1285         uint16_t code;
1286         int ret;
1287
1288         memset(&bind_msg, 0, sizeof(bind_msg));
1289         code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
1290                 HNS3_MBX_UNMAP_RING_TO_VECTOR;
1291         bind_msg.vector_id = vector_id;
1292         bind_msg.ring_num = 1;
1293         bind_msg.param[0].ring_type = HNS3_RING_TYPE_RX;
1294         bind_msg.param[0].tqp_index = queue_id;
1295         bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
1296
1297         ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
1298                                 sizeof(bind_msg), false, NULL, 0);
1299         if (ret) {
1300                 hns3_err(hw, "Map TQP %d fail, vector_id is %d, ret is %d.",
1301                          queue_id, vector_id, ret);
1302                 return ret;
1303         }
1304
1305         return 0;
1306 }
1307
1308 static int
1309 hns3vf_do_stop(struct hns3_adapter *hns)
1310 {
1311         struct hns3_hw *hw = &hns->hw;
1312         bool reset_queue;
1313
1314         hw->mac.link_status = ETH_LINK_DOWN;
1315
1316         if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
1317                 hns3vf_configure_mac_addr(hns, true);
1318                 reset_queue = true;
1319         } else
1320                 reset_queue = false;
1321         return hns3_stop_queues(hns, reset_queue);
1322 }
1323
1324 static void
1325 hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
1326 {
1327         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1328         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1329         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1330         uint8_t base = 0;
1331         uint8_t vec = 0;
1332         uint16_t q_id;
1333
1334         if (dev->data->dev_conf.intr_conf.rxq == 0)
1335                 return;
1336
1337         /* unmap the ring with vector */
1338         if (rte_intr_allow_others(intr_handle)) {
1339                 vec = RTE_INTR_VEC_RXTX_OFFSET;
1340                 base = RTE_INTR_VEC_RXTX_OFFSET;
1341         }
1342         if (rte_intr_dp_is_en(intr_handle)) {
1343                 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
1344                         (void)hns3vf_bind_ring_with_vector(dev, vec, false,
1345                                                            q_id);
1346                         if (vec < base + intr_handle->nb_efd - 1)
1347                                 vec++;
1348                 }
1349         }
1350         /* Clean datapath event and queue/vec mapping */
1351         rte_intr_efd_disable(intr_handle);
1352         if (intr_handle->intr_vec) {
1353                 rte_free(intr_handle->intr_vec);
1354                 intr_handle->intr_vec = NULL;
1355         }
1356 }
1357
1358 static void
1359 hns3vf_dev_stop(struct rte_eth_dev *dev)
1360 {
1361         struct hns3_adapter *hns = dev->data->dev_private;
1362         struct hns3_hw *hw = &hns->hw;
1363
1364         PMD_INIT_FUNC_TRACE();
1365
1366         hw->adapter_state = HNS3_NIC_STOPPING;
1367         hns3_set_rxtx_function(dev);
1368         rte_wmb();
1369         /* Disable datapath on secondary process. */
1370         hns3_mp_req_stop_rxtx(dev);
1371         /* Prevent crashes when queues are still in use. */
1372         rte_delay_ms(hw->tqps_num);
1373
1374         rte_spinlock_lock(&hw->lock);
1375         if (rte_atomic16_read(&hw->reset.resetting) == 0) {
1376                 hns3vf_do_stop(hns);
1377                 hns3_dev_release_mbufs(hns);
1378                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1379         }
1380         rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1381         rte_spinlock_unlock(&hw->lock);
1382
1383         hns3vf_unmap_rx_interrupt(dev);
1384 }
1385
1386 static void
1387 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1388 {
1389         struct hns3_adapter *hns = eth_dev->data->dev_private;
1390         struct hns3_hw *hw = &hns->hw;
1391
1392         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1393                 return;
1394
1395         if (hw->adapter_state == HNS3_NIC_STARTED)
1396                 hns3vf_dev_stop(eth_dev);
1397
1398         hw->adapter_state = HNS3_NIC_CLOSING;
1399         hns3_reset_abort(hns);
1400         hw->adapter_state = HNS3_NIC_CLOSED;
1401         rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1402         hns3vf_configure_all_mc_mac_addr(hns, true);
1403         hns3vf_remove_all_vlan_table(hns);
1404         hns3vf_uninit_vf(eth_dev);
1405         hns3_free_all_queues(eth_dev);
1406         rte_free(hw->reset.wait_data);
1407         rte_free(eth_dev->process_private);
1408         eth_dev->process_private = NULL;
1409         hns3_mp_uninit_primary();
1410         hns3_warn(hw, "Close port %d finished", hw->data->port_id);
1411 }
1412
1413 static int
1414 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1415                        __rte_unused int wait_to_complete)
1416 {
1417         struct hns3_adapter *hns = eth_dev->data->dev_private;
1418         struct hns3_hw *hw = &hns->hw;
1419         struct hns3_mac *mac = &hw->mac;
1420         struct rte_eth_link new_link;
1421
1422         memset(&new_link, 0, sizeof(new_link));
1423         switch (mac->link_speed) {
1424         case ETH_SPEED_NUM_10M:
1425         case ETH_SPEED_NUM_100M:
1426         case ETH_SPEED_NUM_1G:
1427         case ETH_SPEED_NUM_10G:
1428         case ETH_SPEED_NUM_25G:
1429         case ETH_SPEED_NUM_40G:
1430         case ETH_SPEED_NUM_50G:
1431         case ETH_SPEED_NUM_100G:
1432                 new_link.link_speed = mac->link_speed;
1433                 break;
1434         default:
1435                 new_link.link_speed = ETH_SPEED_NUM_100M;
1436                 break;
1437         }
1438
1439         new_link.link_duplex = mac->link_duplex;
1440         new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
1441         new_link.link_autoneg =
1442             !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
1443
1444         return rte_eth_linkstatus_set(eth_dev, &new_link);
1445 }
1446
1447 static int
1448 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1449 {
1450         struct hns3_hw *hw = &hns->hw;
1451         int ret;
1452
1453         ret = hns3vf_set_tc_info(hns);
1454         if (ret)
1455                 return ret;
1456
1457         ret = hns3_start_queues(hns, reset_queue);
1458         if (ret) {
1459                 hns3_err(hw, "Failed to start queues: %d", ret);
1460                 return ret;
1461         }
1462
1463         return 0;
1464 }
1465
1466 static int
1467 hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
1468 {
1469         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1470         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1471         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1472         uint32_t intr_vector;
1473         uint8_t base = 0;
1474         uint8_t vec = 0;
1475         uint16_t q_id;
1476         int ret;
1477
1478         if (dev->data->dev_conf.intr_conf.rxq == 0)
1479                 return 0;
1480
1481         /* disable uio/vfio intr/eventfd mapping */
1482         rte_intr_disable(intr_handle);
1483
1484         /* check and configure queue intr-vector mapping */
1485         if (rte_intr_cap_multiple(intr_handle) ||
1486             !RTE_ETH_DEV_SRIOV(dev).active) {
1487                 intr_vector = hw->used_rx_queues;
1488                 /* It creates event fd for each intr vector when MSIX is used */
1489                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1490                         return -EINVAL;
1491         }
1492         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1493                 intr_handle->intr_vec =
1494                         rte_zmalloc("intr_vec",
1495                                     hw->used_rx_queues * sizeof(int), 0);
1496                 if (intr_handle->intr_vec == NULL) {
1497                         hns3_err(hw, "Failed to allocate %d rx_queues"
1498                                      " intr_vec", hw->used_rx_queues);
1499                         ret = -ENOMEM;
1500                         goto vf_alloc_intr_vec_error;
1501                 }
1502         }
1503
1504         if (rte_intr_allow_others(intr_handle)) {
1505                 vec = RTE_INTR_VEC_RXTX_OFFSET;
1506                 base = RTE_INTR_VEC_RXTX_OFFSET;
1507         }
1508         if (rte_intr_dp_is_en(intr_handle)) {
1509                 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
1510                         ret = hns3vf_bind_ring_with_vector(dev, vec, true,
1511                                                            q_id);
1512                         if (ret)
1513                                 goto vf_bind_vector_error;
1514                         intr_handle->intr_vec[q_id] = vec;
1515                         if (vec < base + intr_handle->nb_efd - 1)
1516                                 vec++;
1517                 }
1518         }
1519         rte_intr_enable(intr_handle);
1520         return 0;
1521
1522 vf_bind_vector_error:
1523         rte_intr_efd_disable(intr_handle);
1524         if (intr_handle->intr_vec) {
1525                 free(intr_handle->intr_vec);
1526                 intr_handle->intr_vec = NULL;
1527         }
1528         return ret;
1529 vf_alloc_intr_vec_error:
1530         rte_intr_efd_disable(intr_handle);
1531         return ret;
1532 }
1533
1534 static int
1535 hns3vf_dev_start(struct rte_eth_dev *dev)
1536 {
1537         struct hns3_adapter *hns = dev->data->dev_private;
1538         struct hns3_hw *hw = &hns->hw;
1539         int ret;
1540
1541         PMD_INIT_FUNC_TRACE();
1542         if (rte_atomic16_read(&hw->reset.resetting))
1543                 return -EBUSY;
1544
1545         rte_spinlock_lock(&hw->lock);
1546         hw->adapter_state = HNS3_NIC_STARTING;
1547         ret = hns3vf_do_start(hns, true);
1548         if (ret) {
1549                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1550                 rte_spinlock_unlock(&hw->lock);
1551                 return ret;
1552         }
1553         hw->adapter_state = HNS3_NIC_STARTED;
1554         rte_spinlock_unlock(&hw->lock);
1555
1556         ret = hns3vf_map_rx_interrupt(dev);
1557         if (ret)
1558                 return ret;
1559         hns3_set_rxtx_function(dev);
1560         hns3_mp_req_start_rxtx(dev);
1561         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
1562
1563         return ret;
1564 }
1565
1566 static bool
1567 is_vf_reset_done(struct hns3_hw *hw)
1568 {
1569 #define HNS3_FUN_RST_ING_BITS \
1570         (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1571          BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1572          BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1573          BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1574
1575         uint32_t val;
1576
1577         if (hw->reset.level == HNS3_VF_RESET) {
1578                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1579                 if (val & HNS3_VF_RST_ING_BIT)
1580                         return false;
1581         } else {
1582                 val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1583                 if (val & HNS3_FUN_RST_ING_BITS)
1584                         return false;
1585         }
1586         return true;
1587 }
1588
1589 bool
1590 hns3vf_is_reset_pending(struct hns3_adapter *hns)
1591 {
1592         struct hns3_hw *hw = &hns->hw;
1593         enum hns3_reset_level reset;
1594
1595         hns3vf_check_event_cause(hns, NULL);
1596         reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
1597         if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
1598                 hns3_warn(hw, "High level reset %d is pending", reset);
1599                 return true;
1600         }
1601         return false;
1602 }
1603
1604 static int
1605 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1606 {
1607         struct hns3_hw *hw = &hns->hw;
1608         struct hns3_wait_data *wait_data = hw->reset.wait_data;
1609         struct timeval tv;
1610
1611         if (wait_data->result == HNS3_WAIT_SUCCESS) {
1612                 /*
1613                  * After vf reset is ready, the PF may not have completed
1614                  * the reset processing. The vf sending mbox to PF may fail
1615                  * during the pf reset, so it is better to add extra delay.
1616                  */
1617                 if (hw->reset.level == HNS3_VF_FUNC_RESET ||
1618                     hw->reset.level == HNS3_FLR_RESET)
1619                         return 0;
1620                 /* Reset retry process, no need to add extra delay. */
1621                 if (hw->reset.attempts)
1622                         return 0;
1623                 if (wait_data->check_completion == NULL)
1624                         return 0;
1625
1626                 wait_data->check_completion = NULL;
1627                 wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC;
1628                 wait_data->count = 1;
1629                 wait_data->result = HNS3_WAIT_REQUEST;
1630                 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
1631                                   wait_data);
1632                 hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete");
1633                 return -EAGAIN;
1634         } else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1635                 gettimeofday(&tv, NULL);
1636                 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1637                           tv.tv_sec, tv.tv_usec);
1638                 return -ETIME;
1639         } else if (wait_data->result == HNS3_WAIT_REQUEST)
1640                 return -EAGAIN;
1641
1642         wait_data->hns = hns;
1643         wait_data->check_completion = is_vf_reset_done;
1644         wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1645                                       HNS3VF_RESET_WAIT_MS + get_timeofday_ms();
1646         wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1647         wait_data->count = HNS3VF_RESET_WAIT_CNT;
1648         wait_data->result = HNS3_WAIT_REQUEST;
1649         rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1650         return -EAGAIN;
1651 }
1652
1653 static int
1654 hns3vf_prepare_reset(struct hns3_adapter *hns)
1655 {
1656         struct hns3_hw *hw = &hns->hw;
1657         int ret = 0;
1658
1659         if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1660                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
1661                                         0, true, NULL, 0);
1662         }
1663         rte_atomic16_set(&hw->reset.disable_cmd, 1);
1664
1665         return ret;
1666 }
1667
1668 static int
1669 hns3vf_stop_service(struct hns3_adapter *hns)
1670 {
1671         struct hns3_hw *hw = &hns->hw;
1672         struct rte_eth_dev *eth_dev;
1673
1674         eth_dev = &rte_eth_devices[hw->data->port_id];
1675         if (hw->adapter_state == HNS3_NIC_STARTED)
1676                 rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
1677         hw->mac.link_status = ETH_LINK_DOWN;
1678
1679         hns3_set_rxtx_function(eth_dev);
1680         rte_wmb();
1681         /* Disable datapath on secondary process. */
1682         hns3_mp_req_stop_rxtx(eth_dev);
1683         rte_delay_ms(hw->tqps_num);
1684
1685         rte_spinlock_lock(&hw->lock);
1686         if (hw->adapter_state == HNS3_NIC_STARTED ||
1687             hw->adapter_state == HNS3_NIC_STOPPING) {
1688                 hns3vf_do_stop(hns);
1689                 hw->reset.mbuf_deferred_free = true;
1690         } else
1691                 hw->reset.mbuf_deferred_free = false;
1692
1693         /*
1694          * It is cumbersome for hardware to pick-and-choose entries for deletion
1695          * from table space. Hence, for function reset software intervention is
1696          * required to delete the entries.
1697          */
1698         if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
1699                 hns3vf_configure_all_mc_mac_addr(hns, true);
1700         rte_spinlock_unlock(&hw->lock);
1701
1702         return 0;
1703 }
1704
1705 static int
1706 hns3vf_start_service(struct hns3_adapter *hns)
1707 {
1708         struct hns3_hw *hw = &hns->hw;
1709         struct rte_eth_dev *eth_dev;
1710
1711         eth_dev = &rte_eth_devices[hw->data->port_id];
1712         hns3_set_rxtx_function(eth_dev);
1713         hns3_mp_req_start_rxtx(eth_dev);
1714         if (hw->adapter_state == HNS3_NIC_STARTED)
1715                 hns3vf_service_handler(eth_dev);
1716
1717         return 0;
1718 }
1719
1720 static int
1721 hns3vf_check_default_mac_change(struct hns3_hw *hw)
1722 {
1723         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1724         struct rte_ether_addr *hw_mac;
1725         int ret;
1726
1727         /*
1728          * The hns3 PF ethdev driver in kernel support setting VF MAC address
1729          * on the host by "ip link set ..." command. If the hns3 PF kernel
1730          * ethdev driver sets the MAC address for VF device after the
1731          * initialization of the related VF device, the PF driver will notify
1732          * VF driver to reset VF device to make the new MAC address effective
1733          * immediately. The hns3 VF PMD driver should check whether the MAC
1734          * address has been changed by the PF kernel ethdev driver, if changed
1735          * VF driver should configure hardware using the new MAC address in the
1736          * recovering hardware configuration stage of the reset process.
1737          */
1738         ret = hns3vf_get_host_mac_addr(hw);
1739         if (ret)
1740                 return ret;
1741
1742         hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
1743         ret = rte_is_zero_ether_addr(hw_mac);
1744         if (ret) {
1745                 rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
1746         } else {
1747                 ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
1748                 if (!ret) {
1749                         rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
1750                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1751                                               &hw->data->mac_addrs[0]);
1752                         hns3_warn(hw, "Default MAC address has been changed to:"
1753                                   " %s by the host PF kernel ethdev driver",
1754                                   mac_str);
1755                 }
1756         }
1757
1758         return 0;
1759 }
1760
1761 static int
1762 hns3vf_restore_conf(struct hns3_adapter *hns)
1763 {
1764         struct hns3_hw *hw = &hns->hw;
1765         int ret;
1766
1767         ret = hns3vf_check_default_mac_change(hw);
1768         if (ret)
1769                 return ret;
1770
1771         ret = hns3vf_configure_mac_addr(hns, false);
1772         if (ret)
1773                 return ret;
1774
1775         ret = hns3vf_configure_all_mc_mac_addr(hns, false);
1776         if (ret)
1777                 goto err_mc_mac;
1778
1779         ret = hns3vf_restore_vlan_conf(hns);
1780         if (ret)
1781                 goto err_vlan_table;
1782
1783         if (hw->adapter_state == HNS3_NIC_STARTED) {
1784                 ret = hns3vf_do_start(hns, false);
1785                 if (ret)
1786                         goto err_vlan_table;
1787                 hns3_info(hw, "hns3vf dev restart successful!");
1788         } else if (hw->adapter_state == HNS3_NIC_STOPPING)
1789                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1790         return 0;
1791
1792 err_vlan_table:
1793         hns3vf_configure_all_mc_mac_addr(hns, true);
1794 err_mc_mac:
1795         hns3vf_configure_mac_addr(hns, true);
1796         return ret;
1797 }
1798
1799 static enum hns3_reset_level
1800 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
1801 {
1802         enum hns3_reset_level reset_level;
1803
1804         /* return the highest priority reset level amongst all */
1805         if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
1806                 reset_level = HNS3_VF_RESET;
1807         else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
1808                 reset_level = HNS3_VF_FULL_RESET;
1809         else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
1810                 reset_level = HNS3_VF_PF_FUNC_RESET;
1811         else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
1812                 reset_level = HNS3_VF_FUNC_RESET;
1813         else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
1814                 reset_level = HNS3_FLR_RESET;
1815         else
1816                 reset_level = HNS3_NONE_RESET;
1817
1818         if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
1819                 return HNS3_NONE_RESET;
1820
1821         return reset_level;
1822 }
1823
1824 static void
1825 hns3vf_reset_service(void *param)
1826 {
1827         struct hns3_adapter *hns = (struct hns3_adapter *)param;
1828         struct hns3_hw *hw = &hns->hw;
1829         enum hns3_reset_level reset_level;
1830         struct timeval tv_delta;
1831         struct timeval tv_start;
1832         struct timeval tv;
1833         uint64_t msec;
1834
1835         /*
1836          * The interrupt is not triggered within the delay time.
1837          * The interrupt may have been lost. It is necessary to handle
1838          * the interrupt to recover from the error.
1839          */
1840         if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
1841                 rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
1842                 hns3_err(hw, "Handling interrupts in delayed tasks");
1843                 hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
1844                 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
1845                 if (reset_level == HNS3_NONE_RESET) {
1846                         hns3_err(hw, "No reset level is set, try global reset");
1847                         hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
1848                 }
1849         }
1850         rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
1851
1852         /*
1853          * Hardware reset has been notified, we now have to poll & check if
1854          * hardware has actually completed the reset sequence.
1855          */
1856         reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
1857         if (reset_level != HNS3_NONE_RESET) {
1858                 gettimeofday(&tv_start, NULL);
1859                 hns3_reset_process(hns, reset_level);
1860                 gettimeofday(&tv, NULL);
1861                 timersub(&tv, &tv_start, &tv_delta);
1862                 msec = tv_delta.tv_sec * MSEC_PER_SEC +
1863                        tv_delta.tv_usec / USEC_PER_MSEC;
1864                 if (msec > HNS3_RESET_PROCESS_MS)
1865                         hns3_err(hw, "%d handle long time delta %" PRIx64
1866                                  " ms time=%ld.%.6ld",
1867                                  hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
1868         }
1869 }
1870
1871 static int
1872 hns3vf_reinit_dev(struct hns3_adapter *hns)
1873 {
1874         struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
1875         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1876         struct hns3_hw *hw = &hns->hw;
1877         int ret;
1878
1879         if (hw->reset.level == HNS3_VF_FULL_RESET) {
1880                 rte_intr_disable(&pci_dev->intr_handle);
1881                 hns3vf_set_bus_master(pci_dev, true);
1882         }
1883
1884         /* Firmware command initialize */
1885         ret = hns3_cmd_init(hw);
1886         if (ret) {
1887                 hns3_err(hw, "Failed to init cmd: %d", ret);
1888                 goto err_cmd_init;
1889         }
1890
1891         if (hw->reset.level == HNS3_VF_FULL_RESET) {
1892                 /*
1893                  * UIO enables msix by writing the pcie configuration space
1894                  * vfio_pci enables msix in rte_intr_enable.
1895                  */
1896                 if (pci_dev->kdrv == RTE_KDRV_IGB_UIO ||
1897                     pci_dev->kdrv == RTE_KDRV_UIO_GENERIC) {
1898                         if (hns3vf_enable_msix(pci_dev, true))
1899                                 hns3_err(hw, "Failed to enable msix");
1900                 }
1901
1902                 rte_intr_enable(&pci_dev->intr_handle);
1903         }
1904
1905         ret = hns3_reset_all_queues(hns);
1906         if (ret) {
1907                 hns3_err(hw, "Failed to reset all queues: %d", ret);
1908                 goto err_init;
1909         }
1910
1911         ret = hns3vf_init_hardware(hns);
1912         if (ret) {
1913                 hns3_err(hw, "Failed to init hardware: %d", ret);
1914                 goto err_init;
1915         }
1916
1917         return 0;
1918
1919 err_cmd_init:
1920         hns3vf_set_bus_master(pci_dev, false);
1921 err_init:
1922         hns3_cmd_uninit(hw);
1923         return ret;
1924 }
1925
1926 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
1927         .dev_start          = hns3vf_dev_start,
1928         .dev_stop           = hns3vf_dev_stop,
1929         .dev_close          = hns3vf_dev_close,
1930         .mtu_set            = hns3vf_dev_mtu_set,
1931         .stats_get          = hns3_stats_get,
1932         .stats_reset        = hns3_stats_reset,
1933         .xstats_get         = hns3_dev_xstats_get,
1934         .xstats_get_names   = hns3_dev_xstats_get_names,
1935         .xstats_reset       = hns3_dev_xstats_reset,
1936         .xstats_get_by_id   = hns3_dev_xstats_get_by_id,
1937         .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
1938         .dev_infos_get      = hns3vf_dev_infos_get,
1939         .rx_queue_setup     = hns3_rx_queue_setup,
1940         .tx_queue_setup     = hns3_tx_queue_setup,
1941         .rx_queue_release   = hns3_dev_rx_queue_release,
1942         .tx_queue_release   = hns3_dev_tx_queue_release,
1943         .rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
1944         .rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
1945         .dev_configure      = hns3vf_dev_configure,
1946         .mac_addr_add       = hns3vf_add_mac_addr,
1947         .mac_addr_remove    = hns3vf_remove_mac_addr,
1948         .mac_addr_set       = hns3vf_set_default_mac_addr,
1949         .set_mc_addr_list   = hns3vf_set_mc_mac_addr_list,
1950         .link_update        = hns3vf_dev_link_update,
1951         .rss_hash_update    = hns3_dev_rss_hash_update,
1952         .rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
1953         .reta_update        = hns3_dev_rss_reta_update,
1954         .reta_query         = hns3_dev_rss_reta_query,
1955         .filter_ctrl        = hns3_dev_filter_ctrl,
1956         .vlan_filter_set    = hns3vf_vlan_filter_set,
1957         .vlan_offload_set   = hns3vf_vlan_offload_set,
1958         .get_reg            = hns3_get_regs,
1959         .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
1960 };
1961
1962 static const struct hns3_reset_ops hns3vf_reset_ops = {
1963         .reset_service       = hns3vf_reset_service,
1964         .stop_service        = hns3vf_stop_service,
1965         .prepare_reset       = hns3vf_prepare_reset,
1966         .wait_hardware_ready = hns3vf_wait_hardware_ready,
1967         .reinit_dev          = hns3vf_reinit_dev,
1968         .restore_conf        = hns3vf_restore_conf,
1969         .start_service       = hns3vf_start_service,
1970 };
1971
1972 static int
1973 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
1974 {
1975         struct hns3_adapter *hns = eth_dev->data->dev_private;
1976         struct hns3_hw *hw = &hns->hw;
1977         int ret;
1978
1979         PMD_INIT_FUNC_TRACE();
1980
1981         eth_dev->process_private = (struct hns3_process_private *)
1982             rte_zmalloc_socket("hns3_filter_list",
1983                                sizeof(struct hns3_process_private),
1984                                RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
1985         if (eth_dev->process_private == NULL) {
1986                 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
1987                 return -ENOMEM;
1988         }
1989
1990         /* initialize flow filter lists */
1991         hns3_filterlist_init(eth_dev);
1992
1993         hns3_set_rxtx_function(eth_dev);
1994         eth_dev->dev_ops = &hns3vf_eth_dev_ops;
1995         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1996                 hns3_mp_init_secondary();
1997                 hw->secondary_cnt++;
1998                 return 0;
1999         }
2000
2001         hns3_mp_init_primary();
2002
2003         hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2004         hns->is_vf = true;
2005         hw->data = eth_dev->data;
2006
2007         ret = hns3_reset_init(hw);
2008         if (ret)
2009                 goto err_init_reset;
2010         hw->reset.ops = &hns3vf_reset_ops;
2011
2012         ret = hns3vf_init_vf(eth_dev);
2013         if (ret) {
2014                 PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2015                 goto err_init_vf;
2016         }
2017
2018         /* Allocate memory for storing MAC addresses */
2019         eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac",
2020                                                sizeof(struct rte_ether_addr) *
2021                                                HNS3_VF_UC_MACADDR_NUM, 0);
2022         if (eth_dev->data->mac_addrs == NULL) {
2023                 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
2024                              "to store MAC addresses",
2025                              sizeof(struct rte_ether_addr) *
2026                              HNS3_VF_UC_MACADDR_NUM);
2027                 ret = -ENOMEM;
2028                 goto err_rte_zmalloc;
2029         }
2030
2031         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
2032                             &eth_dev->data->mac_addrs[0]);
2033         hw->adapter_state = HNS3_NIC_INITIALIZED;
2034         /*
2035          * Pass the information to the rte_eth_dev_close() that it should also
2036          * release the private port resources.
2037          */
2038         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2039
2040         if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
2041                 hns3_err(hw, "Reschedule reset service after dev_init");
2042                 hns3_schedule_reset(hns);
2043         } else {
2044                 /* IMP will wait ready flag before reset */
2045                 hns3_notify_reset_ready(hw, false);
2046         }
2047         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
2048                           eth_dev);
2049         return 0;
2050
2051 err_rte_zmalloc:
2052         hns3vf_uninit_vf(eth_dev);
2053
2054 err_init_vf:
2055         rte_free(hw->reset.wait_data);
2056
2057 err_init_reset:
2058         eth_dev->dev_ops = NULL;
2059         eth_dev->rx_pkt_burst = NULL;
2060         eth_dev->tx_pkt_burst = NULL;
2061         eth_dev->tx_pkt_prepare = NULL;
2062         rte_free(eth_dev->process_private);
2063         eth_dev->process_private = NULL;
2064
2065         return ret;
2066 }
2067
2068 static int
2069 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
2070 {
2071         struct hns3_adapter *hns = eth_dev->data->dev_private;
2072         struct hns3_hw *hw = &hns->hw;
2073
2074         PMD_INIT_FUNC_TRACE();
2075
2076         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2077                 return -EPERM;
2078
2079         eth_dev->dev_ops = NULL;
2080         eth_dev->rx_pkt_burst = NULL;
2081         eth_dev->tx_pkt_burst = NULL;
2082         eth_dev->tx_pkt_prepare = NULL;
2083
2084         if (hw->adapter_state < HNS3_NIC_CLOSING)
2085                 hns3vf_dev_close(eth_dev);
2086
2087         hw->adapter_state = HNS3_NIC_REMOVED;
2088         return 0;
2089 }
2090
2091 static int
2092 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2093                      struct rte_pci_device *pci_dev)
2094 {
2095         return rte_eth_dev_pci_generic_probe(pci_dev,
2096                                              sizeof(struct hns3_adapter),
2097                                              hns3vf_dev_init);
2098 }
2099
2100 static int
2101 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
2102 {
2103         return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
2104 }
2105
2106 static const struct rte_pci_id pci_id_hns3vf_map[] = {
2107         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
2108         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
2109         { .vendor_id = 0, /* sentinel */ },
2110 };
2111
2112 static struct rte_pci_driver rte_hns3vf_pmd = {
2113         .id_table = pci_id_hns3vf_map,
2114         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2115         .probe = eth_hns3vf_pci_probe,
2116         .remove = eth_hns3vf_pci_remove,
2117 };
2118
2119 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
2120 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
2121 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");