7b776ad13e42e8f6674a8b2f90911e5abf4a70b0
[dpdk.git] / drivers / net / hns3 / hns3_ethdev_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <arpa/inet.h>
12 #include <linux/pci_regs.h>
13
14 #include <rte_alarm.h>
15 #include <rte_atomic.h>
16 #include <rte_bus_pci.h>
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_eal.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_ethdev_pci.h>
25 #include <rte_interrupts.h>
26 #include <rte_io.h>
27 #include <rte_log.h>
28 #include <rte_pci.h>
29 #include <rte_vfio.h>
30
31 #include "hns3_ethdev.h"
32 #include "hns3_logs.h"
33 #include "hns3_rxtx.h"
34 #include "hns3_regs.h"
35 #include "hns3_intr.h"
36 #include "hns3_dcb.h"
37 #include "hns3_mp.h"
38
39 #define HNS3VF_KEEP_ALIVE_INTERVAL      2000000 /* us */
40 #define HNS3VF_SERVICE_INTERVAL         1000000 /* us */
41
42 #define HNS3VF_RESET_WAIT_MS    20
43 #define HNS3VF_RESET_WAIT_CNT   2000
44
45 /* Reset related Registers */
46 #define HNS3_GLOBAL_RESET_BIT           0
47 #define HNS3_CORE_RESET_BIT             1
48 #define HNS3_IMP_RESET_BIT              2
49 #define HNS3_FUN_RST_ING_B              0
50
51 enum hns3vf_evt_cause {
52         HNS3VF_VECTOR0_EVENT_RST,
53         HNS3VF_VECTOR0_EVENT_MBX,
54         HNS3VF_VECTOR0_EVENT_OTHER,
55 };
56
57 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
58                                                     uint64_t *levels);
59 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
60 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
61
62 /* set PCI bus mastering */
63 static void
64 hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
65 {
66         uint16_t reg;
67
68         rte_pci_read_config(device, &reg, sizeof(reg), PCI_COMMAND);
69
70         if (op)
71                 /* set the master bit */
72                 reg |= PCI_COMMAND_MASTER;
73         else
74                 reg &= ~(PCI_COMMAND_MASTER);
75
76         rte_pci_write_config(device, &reg, sizeof(reg), PCI_COMMAND);
77 }
78
79 /**
80  * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
81  * @cap: the capability
82  *
83  * Return the address of the given capability within the PCI capability list.
84  */
85 static int
86 hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
87 {
88 #define MAX_PCIE_CAPABILITY 48
89         uint16_t status;
90         uint8_t pos;
91         uint8_t id;
92         int ttl;
93
94         rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
95         if (!(status & PCI_STATUS_CAP_LIST))
96                 return 0;
97
98         ttl = MAX_PCIE_CAPABILITY;
99         rte_pci_read_config(device, &pos, sizeof(pos), PCI_CAPABILITY_LIST);
100         while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
101                 rte_pci_read_config(device, &id, sizeof(id),
102                                     (pos + PCI_CAP_LIST_ID));
103
104                 if (id == 0xFF)
105                         break;
106
107                 if (id == cap)
108                         return (int)pos;
109
110                 rte_pci_read_config(device, &pos, sizeof(pos),
111                                     (pos + PCI_CAP_LIST_NEXT));
112         }
113         return 0;
114 }
115
116 static int
117 hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
118 {
119         uint16_t control;
120         int pos;
121
122         pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
123         if (pos) {
124                 rte_pci_read_config(device, &control, sizeof(control),
125                                     (pos + PCI_MSIX_FLAGS));
126                 if (op)
127                         control |= PCI_MSIX_FLAGS_ENABLE;
128                 else
129                         control &= ~PCI_MSIX_FLAGS_ENABLE;
130                 rte_pci_write_config(device, &control, sizeof(control),
131                                      (pos + PCI_MSIX_FLAGS));
132                 return 0;
133         }
134         return -ENXIO;
135 }
136
137 static int
138 hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
139                     __attribute__ ((unused)) uint32_t idx,
140                     __attribute__ ((unused)) uint32_t pool)
141 {
142         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
143         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
144         int ret;
145
146         rte_spinlock_lock(&hw->lock);
147         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
148                                 HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
149                                 RTE_ETHER_ADDR_LEN, false, NULL, 0);
150         rte_spinlock_unlock(&hw->lock);
151         if (ret) {
152                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
153                                       mac_addr);
154                 hns3_err(hw, "Failed to add mac addr(%s) for vf: %d", mac_str,
155                          ret);
156         }
157
158         return ret;
159 }
160
161 static void
162 hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
163 {
164         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
165         /* index will be checked by upper level rte interface */
166         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
167         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
168         int ret;
169
170         rte_spinlock_lock(&hw->lock);
171         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
172                                 HNS3_MBX_MAC_VLAN_UC_REMOVE,
173                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
174                                 NULL, 0);
175         rte_spinlock_unlock(&hw->lock);
176         if (ret) {
177                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
178                                       mac_addr);
179                 hns3_err(hw, "Failed to remove mac addr(%s) for vf: %d",
180                          mac_str, ret);
181         }
182 }
183
184 static int
185 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
186                             struct rte_ether_addr *mac_addr)
187 {
188 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
189         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
190         struct rte_ether_addr *old_addr;
191         uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
192         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
193         int ret;
194
195         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
196                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
197                                       mac_addr);
198                 hns3_err(hw, "Failed to set mac addr, addr(%s) invalid.",
199                          mac_str);
200                 return -EINVAL;
201         }
202
203         old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
204         rte_spinlock_lock(&hw->lock);
205         memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
206         memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
207                RTE_ETHER_ADDR_LEN);
208
209         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
210                                 HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
211                                 HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
212         if (ret) {
213                 /*
214                  * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev
215                  * driver. When user has configured a MAC address for VF device
216                  * by "ip link set ..." command based on the PF device, the hns3
217                  * PF kernel ethdev driver does not allow VF driver to request
218                  * reconfiguring a different default MAC address, and return
219                  * -EPREM to VF driver through mailbox.
220                  */
221                 if (ret == -EPERM) {
222                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
223                                               old_addr);
224                         hns3_warn(hw, "Has permanet mac addr(%s) for vf",
225                                   mac_str);
226                 } else {
227                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
228                                               mac_addr);
229                         hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
230                                  mac_str, ret);
231                 }
232         }
233
234         rte_ether_addr_copy(mac_addr,
235                             (struct rte_ether_addr *)hw->mac.mac_addr);
236         rte_spinlock_unlock(&hw->lock);
237
238         return ret;
239 }
240
241 static int
242 hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del)
243 {
244         struct hns3_hw *hw = &hns->hw;
245         struct rte_ether_addr *addr;
246         enum hns3_mbx_mac_vlan_subcode opcode;
247         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
248         int ret = 0;
249         int i;
250
251         if (del)
252                 opcode = HNS3_MBX_MAC_VLAN_UC_REMOVE;
253         else
254                 opcode = HNS3_MBX_MAC_VLAN_UC_ADD;
255         for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) {
256                 addr = &hw->data->mac_addrs[i];
257                 if (!rte_is_valid_assigned_ether_addr(addr))
258                         continue;
259                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
260                 hns3_dbg(hw, "rm mac addr: %s", mac_str);
261                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, opcode,
262                                         addr->addr_bytes, RTE_ETHER_ADDR_LEN,
263                                         false, NULL, 0);
264                 if (ret) {
265                         hns3_err(hw, "Failed to remove mac addr for vf: %d",
266                                  ret);
267                         break;
268                 }
269         }
270         return ret;
271 }
272
273 static int
274 hns3vf_add_mc_mac_addr(struct hns3_adapter *hns,
275                        struct rte_ether_addr *mac_addr)
276 {
277         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
278         struct hns3_hw *hw = &hns->hw;
279         int ret;
280
281         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
282                                 HNS3_MBX_MAC_VLAN_MC_ADD,
283                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
284                                 NULL, 0);
285         if (ret) {
286                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
287                                       mac_addr);
288                 hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
289                          mac_str, ret);
290                 return ret;
291         }
292
293         return 0;
294 }
295
296 static int
297 hns3vf_remove_mc_mac_addr(struct hns3_adapter *hns,
298                           struct rte_ether_addr *mac_addr)
299 {
300         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
301         struct hns3_hw *hw = &hns->hw;
302         int ret;
303
304         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
305                                 HNS3_MBX_MAC_VLAN_MC_REMOVE,
306                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
307                                 NULL, 0);
308         if (ret) {
309                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
310                                       mac_addr);
311                 hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
312                          mac_str, ret);
313                 return ret;
314         }
315
316         return 0;
317 }
318
319 static int
320 hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev,
321                             struct rte_ether_addr *mc_addr_set,
322                             uint32_t nb_mc_addr)
323 {
324         struct hns3_adapter *hns = dev->data->dev_private;
325         struct hns3_hw *hw = &hns->hw;
326         struct rte_ether_addr *addr;
327         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
328         int cur_addr_num;
329         int set_addr_num;
330         int num;
331         int ret;
332         int i;
333
334         if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
335                 hns3_err(hw, "Failed to set mc mac addr, nb_mc_addr(%d) "
336                          "invalid. valid range: 0~%d",
337                          nb_mc_addr, HNS3_MC_MACADDR_NUM);
338                 return -EINVAL;
339         }
340
341         set_addr_num = (int)nb_mc_addr;
342         for (i = 0; i < set_addr_num; i++) {
343                 addr = &mc_addr_set[i];
344                 if (!rte_is_multicast_ether_addr(addr)) {
345                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
346                                               addr);
347                         hns3_err(hw,
348                                  "Failed to set mc mac addr, addr(%s) invalid.",
349                                  mac_str);
350                         return -EINVAL;
351                 }
352         }
353         rte_spinlock_lock(&hw->lock);
354         cur_addr_num = hw->mc_addrs_num;
355         for (i = 0; i < cur_addr_num; i++) {
356                 num = cur_addr_num - i - 1;
357                 addr = &hw->mc_addrs[num];
358                 ret = hns3vf_remove_mc_mac_addr(hns, addr);
359                 if (ret) {
360                         rte_spinlock_unlock(&hw->lock);
361                         return ret;
362                 }
363
364                 hw->mc_addrs_num--;
365         }
366
367         for (i = 0; i < set_addr_num; i++) {
368                 addr = &mc_addr_set[i];
369                 ret = hns3vf_add_mc_mac_addr(hns, addr);
370                 if (ret) {
371                         rte_spinlock_unlock(&hw->lock);
372                         return ret;
373                 }
374
375                 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
376                 hw->mc_addrs_num++;
377         }
378         rte_spinlock_unlock(&hw->lock);
379
380         return 0;
381 }
382
383 static int
384 hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
385 {
386         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
387         struct hns3_hw *hw = &hns->hw;
388         struct rte_ether_addr *addr;
389         int err = 0;
390         int ret;
391         int i;
392
393         for (i = 0; i < hw->mc_addrs_num; i++) {
394                 addr = &hw->mc_addrs[i];
395                 if (!rte_is_multicast_ether_addr(addr))
396                         continue;
397                 if (del)
398                         ret = hns3vf_remove_mc_mac_addr(hns, addr);
399                 else
400                         ret = hns3vf_add_mc_mac_addr(hns, addr);
401                 if (ret) {
402                         err = ret;
403                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
404                                               addr);
405                         hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d",
406                                  del ? "Remove" : "Restore", mac_str, ret);
407                 }
408         }
409         return err;
410 }
411
412 static int
413 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
414 {
415         struct hns3_mbx_vf_to_pf_cmd *req;
416         struct hns3_cmd_desc desc;
417         int ret;
418
419         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
420
421         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
422         req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
423         req->msg[1] = en_bc_pmc ? 1 : 0;
424
425         ret = hns3_cmd_send(hw, &desc, 1);
426         if (ret)
427                 hns3_err(hw, "Set promisc mode fail, status is %d", ret);
428
429         return ret;
430 }
431
432 static int
433 hns3vf_dev_configure(struct rte_eth_dev *dev)
434 {
435         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
436         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
437         struct rte_eth_conf *conf = &dev->data->dev_conf;
438         enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
439         uint16_t nb_rx_q = dev->data->nb_rx_queues;
440         uint16_t nb_tx_q = dev->data->nb_tx_queues;
441         struct rte_eth_rss_conf rss_conf;
442         uint16_t mtu;
443         int ret;
444
445         /*
446          * Hardware does not support individually enable/disable/reset the Tx or
447          * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
448          * and Rx queues at the same time. When the numbers of Tx queues
449          * allocated by upper applications are not equal to the numbers of Rx
450          * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
451          * of Tx/Rx queues. otherwise, network engine can not work as usual. But
452          * these fake queues are imperceptible, and can not be used by upper
453          * applications.
454          */
455         ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
456         if (ret) {
457                 hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
458                 return ret;
459         }
460
461         hw->adapter_state = HNS3_NIC_CONFIGURING;
462         if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
463                 hns3_err(hw, "setting link speed/duplex not supported");
464                 ret = -EINVAL;
465                 goto cfg_err;
466         }
467
468         /* When RSS is not configured, redirect the packet queue 0 */
469         if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
470                 rss_conf = conf->rx_adv_conf.rss_conf;
471                 if (rss_conf.rss_key == NULL) {
472                         rss_conf.rss_key = rss_cfg->key;
473                         rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
474                 }
475
476                 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
477                 if (ret)
478                         goto cfg_err;
479         }
480
481         /*
482          * If jumbo frames are enabled, MTU needs to be refreshed
483          * according to the maximum RX packet length.
484          */
485         if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
486                 /*
487                  * Security of max_rx_pkt_len is guaranteed in dpdk frame.
488                  * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
489                  * can safely assign to "uint16_t" type variable.
490                  */
491                 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
492                 ret = hns3vf_dev_mtu_set(dev, mtu);
493                 if (ret)
494                         goto cfg_err;
495                 dev->data->mtu = mtu;
496         }
497
498         ret = hns3vf_dev_configure_vlan(dev);
499         if (ret)
500                 goto cfg_err;
501
502         hw->adapter_state = HNS3_NIC_CONFIGURED;
503         return 0;
504
505 cfg_err:
506         (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
507         hw->adapter_state = HNS3_NIC_INITIALIZED;
508
509         return ret;
510 }
511
512 static int
513 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
514 {
515         int ret;
516
517         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
518                                 sizeof(mtu), true, NULL, 0);
519         if (ret)
520                 hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
521
522         return ret;
523 }
524
525 static int
526 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
527 {
528         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
529         uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
530         int ret;
531
532         if (dev->data->dev_started) {
533                 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
534                          "before configuration", dev->data->port_id);
535                 return -EBUSY;
536         }
537
538         if (rte_atomic16_read(&hw->reset.resetting)) {
539                 hns3_err(hw, "Failed to set mtu during resetting");
540                 return -EIO;
541         }
542
543         rte_spinlock_lock(&hw->lock);
544         ret = hns3vf_config_mtu(hw, mtu);
545         if (ret) {
546                 rte_spinlock_unlock(&hw->lock);
547                 return ret;
548         }
549         if (frame_size > RTE_ETHER_MAX_LEN)
550                 dev->data->dev_conf.rxmode.offloads |=
551                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
552         else
553                 dev->data->dev_conf.rxmode.offloads &=
554                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
555         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
556         rte_spinlock_unlock(&hw->lock);
557
558         return 0;
559 }
560
561 static int
562 hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
563 {
564         struct hns3_adapter *hns = eth_dev->data->dev_private;
565         struct hns3_hw *hw = &hns->hw;
566
567         info->max_rx_queues = hw->tqps_num;
568         info->max_tx_queues = hw->tqps_num;
569         info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
570         info->min_rx_bufsize = hw->rx_buf_len;
571         info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
572         info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
573
574         info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
575                                  DEV_RX_OFFLOAD_UDP_CKSUM |
576                                  DEV_RX_OFFLOAD_TCP_CKSUM |
577                                  DEV_RX_OFFLOAD_SCTP_CKSUM |
578                                  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
579                                  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
580                                  DEV_RX_OFFLOAD_KEEP_CRC |
581                                  DEV_RX_OFFLOAD_SCATTER |
582                                  DEV_RX_OFFLOAD_VLAN_STRIP |
583                                  DEV_RX_OFFLOAD_QINQ_STRIP |
584                                  DEV_RX_OFFLOAD_VLAN_FILTER |
585                                  DEV_RX_OFFLOAD_JUMBO_FRAME);
586         info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
587         info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
588                                  DEV_TX_OFFLOAD_IPV4_CKSUM |
589                                  DEV_TX_OFFLOAD_TCP_CKSUM |
590                                  DEV_TX_OFFLOAD_UDP_CKSUM |
591                                  DEV_TX_OFFLOAD_SCTP_CKSUM |
592                                  DEV_TX_OFFLOAD_VLAN_INSERT |
593                                  DEV_TX_OFFLOAD_QINQ_INSERT |
594                                  DEV_TX_OFFLOAD_MULTI_SEGS |
595                                  DEV_TX_OFFLOAD_TCP_TSO |
596                                  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
597                                  DEV_TX_OFFLOAD_GRE_TNL_TSO |
598                                  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
599                                  info->tx_queue_offload_capa);
600
601         info->rx_desc_lim = (struct rte_eth_desc_lim) {
602                 .nb_max = HNS3_MAX_RING_DESC,
603                 .nb_min = HNS3_MIN_RING_DESC,
604                 .nb_align = HNS3_ALIGN_RING_DESC,
605         };
606
607         info->tx_desc_lim = (struct rte_eth_desc_lim) {
608                 .nb_max = HNS3_MAX_RING_DESC,
609                 .nb_min = HNS3_MIN_RING_DESC,
610                 .nb_align = HNS3_ALIGN_RING_DESC,
611         };
612
613         info->vmdq_queue_num = 0;
614
615         info->reta_size = HNS3_RSS_IND_TBL_SIZE;
616         info->hash_key_size = HNS3_RSS_KEY_SIZE;
617         info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
618         info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
619         info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
620
621         return 0;
622 }
623
624 static void
625 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
626 {
627         hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
628 }
629
630 static void
631 hns3vf_disable_irq0(struct hns3_hw *hw)
632 {
633         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
634 }
635
636 static void
637 hns3vf_enable_irq0(struct hns3_hw *hw)
638 {
639         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
640 }
641
642 static enum hns3vf_evt_cause
643 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
644 {
645         struct hns3_hw *hw = &hns->hw;
646         enum hns3vf_evt_cause ret;
647         uint32_t cmdq_stat_reg;
648         uint32_t rst_ing_reg;
649         uint32_t val;
650
651         /* Fetch the events from their corresponding regs */
652         cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
653
654         if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
655                 rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
656                 hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
657                 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
658                 rte_atomic16_set(&hw->reset.disable_cmd, 1);
659                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
660                 hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
661                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
662                 if (clearval) {
663                         hw->reset.stats.global_cnt++;
664                         hns3_warn(hw, "Global reset detected, clear reset status");
665                 } else {
666                         hns3_schedule_delayed_reset(hns);
667                         hns3_warn(hw, "Global reset detected, don't clear reset status");
668                 }
669
670                 ret = HNS3VF_VECTOR0_EVENT_RST;
671                 goto out;
672         }
673
674         /* Check for vector0 mailbox(=CMDQ RX) event source */
675         if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
676                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
677                 ret = HNS3VF_VECTOR0_EVENT_MBX;
678                 goto out;
679         }
680
681         val = 0;
682         ret = HNS3VF_VECTOR0_EVENT_OTHER;
683 out:
684         if (clearval)
685                 *clearval = val;
686         return ret;
687 }
688
689 static void
690 hns3vf_interrupt_handler(void *param)
691 {
692         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
693         struct hns3_adapter *hns = dev->data->dev_private;
694         struct hns3_hw *hw = &hns->hw;
695         enum hns3vf_evt_cause event_cause;
696         uint32_t clearval;
697
698         if (hw->irq_thread_id == 0)
699                 hw->irq_thread_id = pthread_self();
700
701         /* Disable interrupt */
702         hns3vf_disable_irq0(hw);
703
704         /* Read out interrupt causes */
705         event_cause = hns3vf_check_event_cause(hns, &clearval);
706
707         switch (event_cause) {
708         case HNS3VF_VECTOR0_EVENT_RST:
709                 hns3_schedule_reset(hns);
710                 break;
711         case HNS3VF_VECTOR0_EVENT_MBX:
712                 hns3_dev_handle_mbx_msg(hw);
713                 break;
714         default:
715                 break;
716         }
717
718         /* Clear interrupt causes */
719         hns3vf_clear_event_cause(hw, clearval);
720
721         /* Enable interrupt */
722         hns3vf_enable_irq0(hw);
723 }
724
725 static int
726 hns3vf_check_tqp_info(struct hns3_hw *hw)
727 {
728         uint16_t tqps_num;
729
730         tqps_num = hw->tqps_num;
731         if (tqps_num > HNS3_MAX_TQP_NUM_PER_FUNC || tqps_num == 0) {
732                 PMD_INIT_LOG(ERR, "Get invalid tqps_num(%u) from PF. valid "
733                                   "range: 1~%d",
734                              tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
735                 return -EINVAL;
736         }
737
738         if (hw->rx_buf_len == 0)
739                 hw->rx_buf_len = HNS3_DEFAULT_RX_BUF_LEN;
740         hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, hw->tqps_num);
741
742         return 0;
743 }
744
745 static int
746 hns3vf_get_queue_info(struct hns3_hw *hw)
747 {
748 #define HNS3VF_TQPS_RSS_INFO_LEN        6
749         uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
750         int ret;
751
752         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
753                                 resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
754         if (ret) {
755                 PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
756                 return ret;
757         }
758
759         memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
760         memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
761         memcpy(&hw->rx_buf_len, &resp_msg[4], sizeof(uint16_t));
762
763         return hns3vf_check_tqp_info(hw);
764 }
765
766 static int
767 hns3vf_get_queue_depth(struct hns3_hw *hw)
768 {
769 #define HNS3VF_TQPS_DEPTH_INFO_LEN      4
770         uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN];
771         int ret;
772
773         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true,
774                                 resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN);
775         if (ret) {
776                 PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d",
777                              ret);
778                 return ret;
779         }
780
781         memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t));
782         memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t));
783
784         return 0;
785 }
786
787 static int
788 hns3vf_get_tc_info(struct hns3_hw *hw)
789 {
790         uint8_t resp_msg;
791         int ret;
792
793         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0,
794                                 true, &resp_msg, sizeof(resp_msg));
795         if (ret) {
796                 hns3_err(hw, "VF request to get TC info from PF failed %d",
797                          ret);
798                 return ret;
799         }
800
801         hw->hw_tc_map = resp_msg;
802
803         return 0;
804 }
805
806 static int
807 hns3vf_get_host_mac_addr(struct hns3_hw *hw)
808 {
809         uint8_t host_mac[RTE_ETHER_ADDR_LEN];
810         int ret;
811
812         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
813                                 true, host_mac, RTE_ETHER_ADDR_LEN);
814         if (ret) {
815                 hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
816                 return ret;
817         }
818
819         memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
820
821         return 0;
822 }
823
824 static int
825 hns3vf_get_configuration(struct hns3_hw *hw)
826 {
827         int ret;
828
829         hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
830
831         /* Get queue configuration from PF */
832         ret = hns3vf_get_queue_info(hw);
833         if (ret)
834                 return ret;
835
836         /* Get queue depth info from PF */
837         ret = hns3vf_get_queue_depth(hw);
838         if (ret)
839                 return ret;
840
841         /* Get user defined VF MAC addr from PF */
842         ret = hns3vf_get_host_mac_addr(hw);
843         if (ret)
844                 return ret;
845
846         /* Get tc configuration from PF */
847         return hns3vf_get_tc_info(hw);
848 }
849
850 static int
851 hns3vf_set_tc_info(struct hns3_adapter *hns)
852 {
853         struct hns3_hw *hw = &hns->hw;
854         uint16_t nb_rx_q = hw->data->nb_rx_queues;
855         uint16_t nb_tx_q = hw->data->nb_tx_queues;
856         uint8_t i;
857
858         hw->num_tc = 0;
859         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
860                 if (hw->hw_tc_map & BIT(i))
861                         hw->num_tc++;
862
863         if (nb_rx_q < hw->num_tc) {
864                 hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
865                          nb_rx_q, hw->num_tc);
866                 return -EINVAL;
867         }
868
869         if (nb_tx_q < hw->num_tc) {
870                 hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
871                          nb_tx_q, hw->num_tc);
872                 return -EINVAL;
873         }
874
875         hns3_set_rss_size(hw, nb_rx_q);
876         hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
877
878         return 0;
879 }
880
881 static void
882 hns3vf_request_link_info(struct hns3_hw *hw)
883 {
884         uint8_t resp_msg;
885         int ret;
886
887         if (rte_atomic16_read(&hw->reset.resetting))
888                 return;
889         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
890                                 &resp_msg, sizeof(resp_msg));
891         if (ret)
892                 hns3_err(hw, "Failed to fetch link status from PF: %d", ret);
893 }
894
895 static int
896 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
897 {
898 #define HNS3VF_VLAN_MBX_MSG_LEN 5
899         struct hns3_hw *hw = &hns->hw;
900         uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
901         uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
902         uint8_t is_kill = on ? 0 : 1;
903
904         msg_data[0] = is_kill;
905         memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
906         memcpy(&msg_data[3], &proto, sizeof(proto));
907
908         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
909                                  msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
910                                  0);
911 }
912
913 static int
914 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
915 {
916         struct hns3_adapter *hns = dev->data->dev_private;
917         struct hns3_hw *hw = &hns->hw;
918         int ret;
919
920         if (rte_atomic16_read(&hw->reset.resetting)) {
921                 hns3_err(hw,
922                          "vf set vlan id failed during resetting, vlan_id =%u",
923                          vlan_id);
924                 return -EIO;
925         }
926         rte_spinlock_lock(&hw->lock);
927         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
928         rte_spinlock_unlock(&hw->lock);
929         if (ret)
930                 hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
931                          vlan_id, ret);
932
933         return ret;
934 }
935
936 static int
937 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
938 {
939         uint8_t msg_data;
940         int ret;
941
942         msg_data = enable ? 1 : 0;
943         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
944                                 &msg_data, sizeof(msg_data), false, NULL, 0);
945         if (ret)
946                 hns3_err(hw, "vf enable strip failed, ret =%d", ret);
947
948         return ret;
949 }
950
951 static int
952 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
953 {
954         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
955         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
956         unsigned int tmp_mask;
957
958         tmp_mask = (unsigned int)mask;
959         /* Vlan stripping setting */
960         if (tmp_mask & ETH_VLAN_STRIP_MASK) {
961                 rte_spinlock_lock(&hw->lock);
962                 /* Enable or disable VLAN stripping */
963                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
964                         hns3vf_en_hw_strip_rxvtag(hw, true);
965                 else
966                         hns3vf_en_hw_strip_rxvtag(hw, false);
967                 rte_spinlock_unlock(&hw->lock);
968         }
969
970         return 0;
971 }
972
973 static int
974 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
975 {
976         struct rte_vlan_filter_conf *vfc;
977         struct hns3_hw *hw = &hns->hw;
978         uint16_t vlan_id;
979         uint64_t vbit;
980         uint64_t ids;
981         int ret = 0;
982         uint32_t i;
983
984         vfc = &hw->data->vlan_filter_conf;
985         for (i = 0; i < RTE_DIM(vfc->ids); i++) {
986                 if (vfc->ids[i] == 0)
987                         continue;
988                 ids = vfc->ids[i];
989                 while (ids) {
990                         /*
991                          * 64 means the num bits of ids, one bit corresponds to
992                          * one vlan id
993                          */
994                         vlan_id = 64 * i;
995                         /* count trailing zeroes */
996                         vbit = ~ids & (ids - 1);
997                         /* clear least significant bit set */
998                         ids ^= (ids ^ (ids - 1)) ^ vbit;
999                         for (; vbit;) {
1000                                 vbit >>= 1;
1001                                 vlan_id++;
1002                         }
1003                         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1004                         if (ret) {
1005                                 hns3_err(hw,
1006                                          "VF handle vlan table failed, ret =%d, on = %d",
1007                                          ret, on);
1008                                 return ret;
1009                         }
1010                 }
1011         }
1012
1013         return ret;
1014 }
1015
1016 static int
1017 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1018 {
1019         return hns3vf_handle_all_vlan_table(hns, 0);
1020 }
1021
1022 static int
1023 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1024 {
1025         struct hns3_hw *hw = &hns->hw;
1026         struct rte_eth_conf *dev_conf;
1027         bool en;
1028         int ret;
1029
1030         dev_conf = &hw->data->dev_conf;
1031         en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
1032                                                                    : false;
1033         ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1034         if (ret)
1035                 hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1036                          ret);
1037         return ret;
1038 }
1039
1040 static int
1041 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1042 {
1043         struct hns3_adapter *hns = dev->data->dev_private;
1044         struct rte_eth_dev_data *data = dev->data;
1045         struct hns3_hw *hw = &hns->hw;
1046         int ret;
1047
1048         if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1049             data->dev_conf.txmode.hw_vlan_reject_untagged ||
1050             data->dev_conf.txmode.hw_vlan_insert_pvid) {
1051                 hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1052                               "or hw_vlan_insert_pvid is not support!");
1053         }
1054
1055         /* Apply vlan offload setting */
1056         ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1057         if (ret)
1058                 hns3_err(hw, "dev config vlan offload failed, ret =%d", ret);
1059
1060         return ret;
1061 }
1062
1063 static int
1064 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1065 {
1066         uint8_t msg_data;
1067
1068         msg_data = alive ? 1 : 0;
1069         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
1070                                  sizeof(msg_data), false, NULL, 0);
1071 }
1072
1073 static void
1074 hns3vf_keep_alive_handler(void *param)
1075 {
1076         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1077         struct hns3_adapter *hns = eth_dev->data->dev_private;
1078         struct hns3_hw *hw = &hns->hw;
1079         uint8_t respmsg;
1080         int ret;
1081
1082         ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
1083                                 false, &respmsg, sizeof(uint8_t));
1084         if (ret)
1085                 hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1086                          ret);
1087
1088         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1089                           eth_dev);
1090 }
1091
1092 static void
1093 hns3vf_service_handler(void *param)
1094 {
1095         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1096         struct hns3_adapter *hns = eth_dev->data->dev_private;
1097         struct hns3_hw *hw = &hns->hw;
1098
1099         /*
1100          * The query link status and reset processing are executed in the
1101          * interrupt thread.When the IMP reset occurs, IMP will not respond,
1102          * and the query operation will time out after 30ms. In the case of
1103          * multiple PF/VFs, each query failure timeout causes the IMP reset
1104          * interrupt to fail to respond within 100ms.
1105          * Before querying the link status, check whether there is a reset
1106          * pending, and if so, abandon the query.
1107          */
1108         if (!hns3vf_is_reset_pending(hns))
1109                 hns3vf_request_link_info(hw);
1110         else
1111                 hns3_warn(hw, "Cancel the query when reset is pending");
1112
1113         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1114                           eth_dev);
1115 }
1116
1117 static int
1118 hns3vf_init_hardware(struct hns3_adapter *hns)
1119 {
1120         struct hns3_hw *hw = &hns->hw;
1121         uint16_t mtu = hw->data->mtu;
1122         int ret;
1123
1124         ret = hns3vf_set_promisc_mode(hw, true);
1125         if (ret)
1126                 return ret;
1127
1128         ret = hns3vf_config_mtu(hw, mtu);
1129         if (ret)
1130                 goto err_init_hardware;
1131
1132         ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1133         if (ret) {
1134                 PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1135                 goto err_init_hardware;
1136         }
1137
1138         ret = hns3_config_gro(hw, false);
1139         if (ret) {
1140                 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1141                 goto err_init_hardware;
1142         }
1143
1144         ret = hns3vf_set_alive(hw, true);
1145         if (ret) {
1146                 PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1147                 goto err_init_hardware;
1148         }
1149
1150         hns3vf_request_link_info(hw);
1151         return 0;
1152
1153 err_init_hardware:
1154         (void)hns3vf_set_promisc_mode(hw, false);
1155         return ret;
1156 }
1157
1158 static int
1159 hns3vf_clear_vport_list(struct hns3_hw *hw)
1160 {
1161         return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
1162                                  HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
1163                                  NULL, 0);
1164 }
1165
1166 static int
1167 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1168 {
1169         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1170         struct hns3_adapter *hns = eth_dev->data->dev_private;
1171         struct hns3_hw *hw = &hns->hw;
1172         int ret;
1173
1174         PMD_INIT_FUNC_TRACE();
1175
1176         /* Get hardware io base address from pcie BAR2 IO space */
1177         hw->io_base = pci_dev->mem_resource[2].addr;
1178
1179         /* Firmware command queue initialize */
1180         ret = hns3_cmd_init_queue(hw);
1181         if (ret) {
1182                 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1183                 goto err_cmd_init_queue;
1184         }
1185
1186         /* Firmware command initialize */
1187         ret = hns3_cmd_init(hw);
1188         if (ret) {
1189                 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1190                 goto err_cmd_init;
1191         }
1192
1193         rte_spinlock_init(&hw->mbx_resp.lock);
1194
1195         hns3vf_clear_event_cause(hw, 0);
1196
1197         ret = rte_intr_callback_register(&pci_dev->intr_handle,
1198                                          hns3vf_interrupt_handler, eth_dev);
1199         if (ret) {
1200                 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1201                 goto err_intr_callback_register;
1202         }
1203
1204         /* Enable interrupt */
1205         rte_intr_enable(&pci_dev->intr_handle);
1206         hns3vf_enable_irq0(hw);
1207
1208         /* Get configuration from PF */
1209         ret = hns3vf_get_configuration(hw);
1210         if (ret) {
1211                 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1212                 goto err_get_config;
1213         }
1214
1215         /*
1216          * The hns3 PF ethdev driver in kernel support setting VF MAC address
1217          * on the host by "ip link set ..." command. To avoid some incorrect
1218          * scenes, for example, hns3 VF PMD driver fails to receive and send
1219          * packets after user configure the MAC address by using the
1220          * "ip link set ..." command, hns3 VF PMD driver keep the same MAC
1221          * address strategy as the hns3 kernel ethdev driver in the
1222          * initialization. If user configure a MAC address by the ip command
1223          * for VF device, then hns3 VF PMD driver will start with it, otherwise
1224          * start with a random MAC address in the initialization.
1225          */
1226         ret = rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr);
1227         if (ret)
1228                 rte_eth_random_addr(hw->mac.mac_addr);
1229
1230         ret = hns3vf_clear_vport_list(hw);
1231         if (ret) {
1232                 PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1233                 goto err_get_config;
1234         }
1235
1236         ret = hns3vf_init_hardware(hns);
1237         if (ret)
1238                 goto err_get_config;
1239
1240         hns3_set_default_rss_args(hw);
1241
1242         return 0;
1243
1244 err_get_config:
1245         hns3vf_disable_irq0(hw);
1246         rte_intr_disable(&pci_dev->intr_handle);
1247         hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
1248                              eth_dev);
1249 err_intr_callback_register:
1250         hns3_cmd_uninit(hw);
1251
1252 err_cmd_init:
1253         hns3_cmd_destroy_queue(hw);
1254
1255 err_cmd_init_queue:
1256         hw->io_base = NULL;
1257
1258         return ret;
1259 }
1260
1261 static void
1262 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1263 {
1264         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1265         struct hns3_adapter *hns = eth_dev->data->dev_private;
1266         struct hns3_hw *hw = &hns->hw;
1267
1268         PMD_INIT_FUNC_TRACE();
1269
1270         hns3_rss_uninit(hns);
1271         (void)hns3vf_set_alive(hw, false);
1272         (void)hns3vf_set_promisc_mode(hw, false);
1273         hns3vf_disable_irq0(hw);
1274         rte_intr_disable(&pci_dev->intr_handle);
1275         hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
1276                              eth_dev);
1277         hns3_cmd_uninit(hw);
1278         hns3_cmd_destroy_queue(hw);
1279         hw->io_base = NULL;
1280 }
1281
1282 static int
1283 hns3vf_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
1284                              bool mmap, uint16_t queue_id)
1285
1286 {
1287         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1288         struct hns3_vf_bind_vector_msg bind_msg;
1289         uint16_t code;
1290         int ret;
1291
1292         memset(&bind_msg, 0, sizeof(bind_msg));
1293         code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
1294                 HNS3_MBX_UNMAP_RING_TO_VECTOR;
1295         bind_msg.vector_id = vector_id;
1296         bind_msg.ring_num = 1;
1297         bind_msg.param[0].ring_type = HNS3_RING_TYPE_RX;
1298         bind_msg.param[0].tqp_index = queue_id;
1299         bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
1300
1301         ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
1302                                 sizeof(bind_msg), false, NULL, 0);
1303         if (ret) {
1304                 hns3_err(hw, "Map TQP %d fail, vector_id is %d, ret is %d.",
1305                          queue_id, vector_id, ret);
1306                 return ret;
1307         }
1308
1309         return 0;
1310 }
1311
1312 static int
1313 hns3vf_do_stop(struct hns3_adapter *hns)
1314 {
1315         struct hns3_hw *hw = &hns->hw;
1316         bool reset_queue;
1317
1318         hw->mac.link_status = ETH_LINK_DOWN;
1319
1320         if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
1321                 hns3vf_configure_mac_addr(hns, true);
1322                 reset_queue = true;
1323         } else
1324                 reset_queue = false;
1325         return hns3_stop_queues(hns, reset_queue);
1326 }
1327
1328 static void
1329 hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
1330 {
1331         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1332         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1333         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1334         uint8_t base = 0;
1335         uint8_t vec = 0;
1336         uint16_t q_id;
1337
1338         if (dev->data->dev_conf.intr_conf.rxq == 0)
1339                 return;
1340
1341         /* unmap the ring with vector */
1342         if (rte_intr_allow_others(intr_handle)) {
1343                 vec = RTE_INTR_VEC_RXTX_OFFSET;
1344                 base = RTE_INTR_VEC_RXTX_OFFSET;
1345         }
1346         if (rte_intr_dp_is_en(intr_handle)) {
1347                 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
1348                         (void)hns3vf_bind_ring_with_vector(dev, vec, false,
1349                                                            q_id);
1350                         if (vec < base + intr_handle->nb_efd - 1)
1351                                 vec++;
1352                 }
1353         }
1354         /* Clean datapath event and queue/vec mapping */
1355         rte_intr_efd_disable(intr_handle);
1356         if (intr_handle->intr_vec) {
1357                 rte_free(intr_handle->intr_vec);
1358                 intr_handle->intr_vec = NULL;
1359         }
1360 }
1361
1362 static void
1363 hns3vf_dev_stop(struct rte_eth_dev *dev)
1364 {
1365         struct hns3_adapter *hns = dev->data->dev_private;
1366         struct hns3_hw *hw = &hns->hw;
1367
1368         PMD_INIT_FUNC_TRACE();
1369
1370         hw->adapter_state = HNS3_NIC_STOPPING;
1371         hns3_set_rxtx_function(dev);
1372         rte_wmb();
1373         /* Disable datapath on secondary process. */
1374         hns3_mp_req_stop_rxtx(dev);
1375         /* Prevent crashes when queues are still in use. */
1376         rte_delay_ms(hw->tqps_num);
1377
1378         rte_spinlock_lock(&hw->lock);
1379         if (rte_atomic16_read(&hw->reset.resetting) == 0) {
1380                 hns3vf_do_stop(hns);
1381                 hns3_dev_release_mbufs(hns);
1382                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1383         }
1384         rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1385         rte_spinlock_unlock(&hw->lock);
1386
1387         hns3vf_unmap_rx_interrupt(dev);
1388 }
1389
1390 static void
1391 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1392 {
1393         struct hns3_adapter *hns = eth_dev->data->dev_private;
1394         struct hns3_hw *hw = &hns->hw;
1395
1396         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1397                 return;
1398
1399         if (hw->adapter_state == HNS3_NIC_STARTED)
1400                 hns3vf_dev_stop(eth_dev);
1401
1402         hw->adapter_state = HNS3_NIC_CLOSING;
1403         hns3_reset_abort(hns);
1404         hw->adapter_state = HNS3_NIC_CLOSED;
1405         rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1406         hns3vf_configure_all_mc_mac_addr(hns, true);
1407         hns3vf_remove_all_vlan_table(hns);
1408         hns3vf_uninit_vf(eth_dev);
1409         hns3_free_all_queues(eth_dev);
1410         rte_free(hw->reset.wait_data);
1411         rte_free(eth_dev->process_private);
1412         eth_dev->process_private = NULL;
1413         hns3_mp_uninit_primary();
1414         hns3_warn(hw, "Close port %d finished", hw->data->port_id);
1415 }
1416
1417 static int
1418 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1419                        __rte_unused int wait_to_complete)
1420 {
1421         struct hns3_adapter *hns = eth_dev->data->dev_private;
1422         struct hns3_hw *hw = &hns->hw;
1423         struct hns3_mac *mac = &hw->mac;
1424         struct rte_eth_link new_link;
1425
1426         memset(&new_link, 0, sizeof(new_link));
1427         switch (mac->link_speed) {
1428         case ETH_SPEED_NUM_10M:
1429         case ETH_SPEED_NUM_100M:
1430         case ETH_SPEED_NUM_1G:
1431         case ETH_SPEED_NUM_10G:
1432         case ETH_SPEED_NUM_25G:
1433         case ETH_SPEED_NUM_40G:
1434         case ETH_SPEED_NUM_50G:
1435         case ETH_SPEED_NUM_100G:
1436                 new_link.link_speed = mac->link_speed;
1437                 break;
1438         default:
1439                 new_link.link_speed = ETH_SPEED_NUM_100M;
1440                 break;
1441         }
1442
1443         new_link.link_duplex = mac->link_duplex;
1444         new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
1445         new_link.link_autoneg =
1446             !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
1447
1448         return rte_eth_linkstatus_set(eth_dev, &new_link);
1449 }
1450
1451 static int
1452 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1453 {
1454         struct hns3_hw *hw = &hns->hw;
1455         int ret;
1456
1457         ret = hns3vf_set_tc_info(hns);
1458         if (ret)
1459                 return ret;
1460
1461         ret = hns3_start_queues(hns, reset_queue);
1462         if (ret) {
1463                 hns3_err(hw, "Failed to start queues: %d", ret);
1464                 return ret;
1465         }
1466
1467         return 0;
1468 }
1469
1470 static int
1471 hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
1472 {
1473         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1474         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1475         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1476         uint32_t intr_vector;
1477         uint8_t base = 0;
1478         uint8_t vec = 0;
1479         uint16_t q_id;
1480         int ret;
1481
1482         if (dev->data->dev_conf.intr_conf.rxq == 0)
1483                 return 0;
1484
1485         /* disable uio/vfio intr/eventfd mapping */
1486         rte_intr_disable(intr_handle);
1487
1488         /* check and configure queue intr-vector mapping */
1489         if (rte_intr_cap_multiple(intr_handle) ||
1490             !RTE_ETH_DEV_SRIOV(dev).active) {
1491                 intr_vector = hw->used_rx_queues;
1492                 /* It creates event fd for each intr vector when MSIX is used */
1493                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1494                         return -EINVAL;
1495         }
1496         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1497                 intr_handle->intr_vec =
1498                         rte_zmalloc("intr_vec",
1499                                     hw->used_rx_queues * sizeof(int), 0);
1500                 if (intr_handle->intr_vec == NULL) {
1501                         hns3_err(hw, "Failed to allocate %d rx_queues"
1502                                      " intr_vec", hw->used_rx_queues);
1503                         ret = -ENOMEM;
1504                         goto vf_alloc_intr_vec_error;
1505                 }
1506         }
1507
1508         if (rte_intr_allow_others(intr_handle)) {
1509                 vec = RTE_INTR_VEC_RXTX_OFFSET;
1510                 base = RTE_INTR_VEC_RXTX_OFFSET;
1511         }
1512         if (rte_intr_dp_is_en(intr_handle)) {
1513                 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
1514                         ret = hns3vf_bind_ring_with_vector(dev, vec, true,
1515                                                            q_id);
1516                         if (ret)
1517                                 goto vf_bind_vector_error;
1518                         intr_handle->intr_vec[q_id] = vec;
1519                         if (vec < base + intr_handle->nb_efd - 1)
1520                                 vec++;
1521                 }
1522         }
1523         rte_intr_enable(intr_handle);
1524         return 0;
1525
1526 vf_bind_vector_error:
1527         rte_intr_efd_disable(intr_handle);
1528         if (intr_handle->intr_vec) {
1529                 free(intr_handle->intr_vec);
1530                 intr_handle->intr_vec = NULL;
1531         }
1532         return ret;
1533 vf_alloc_intr_vec_error:
1534         rte_intr_efd_disable(intr_handle);
1535         return ret;
1536 }
1537
1538 static int
1539 hns3vf_dev_start(struct rte_eth_dev *dev)
1540 {
1541         struct hns3_adapter *hns = dev->data->dev_private;
1542         struct hns3_hw *hw = &hns->hw;
1543         int ret;
1544
1545         PMD_INIT_FUNC_TRACE();
1546         if (rte_atomic16_read(&hw->reset.resetting))
1547                 return -EBUSY;
1548
1549         rte_spinlock_lock(&hw->lock);
1550         hw->adapter_state = HNS3_NIC_STARTING;
1551         ret = hns3vf_do_start(hns, true);
1552         if (ret) {
1553                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1554                 rte_spinlock_unlock(&hw->lock);
1555                 return ret;
1556         }
1557         hw->adapter_state = HNS3_NIC_STARTED;
1558         rte_spinlock_unlock(&hw->lock);
1559
1560         ret = hns3vf_map_rx_interrupt(dev);
1561         if (ret)
1562                 return ret;
1563         hns3_set_rxtx_function(dev);
1564         hns3_mp_req_start_rxtx(dev);
1565         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
1566
1567         return ret;
1568 }
1569
1570 static bool
1571 is_vf_reset_done(struct hns3_hw *hw)
1572 {
1573 #define HNS3_FUN_RST_ING_BITS \
1574         (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1575          BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1576          BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1577          BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1578
1579         uint32_t val;
1580
1581         if (hw->reset.level == HNS3_VF_RESET) {
1582                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1583                 if (val & HNS3_VF_RST_ING_BIT)
1584                         return false;
1585         } else {
1586                 val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1587                 if (val & HNS3_FUN_RST_ING_BITS)
1588                         return false;
1589         }
1590         return true;
1591 }
1592
1593 bool
1594 hns3vf_is_reset_pending(struct hns3_adapter *hns)
1595 {
1596         struct hns3_hw *hw = &hns->hw;
1597         enum hns3_reset_level reset;
1598
1599         hns3vf_check_event_cause(hns, NULL);
1600         reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
1601         if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
1602                 hns3_warn(hw, "High level reset %d is pending", reset);
1603                 return true;
1604         }
1605         return false;
1606 }
1607
1608 static int
1609 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1610 {
1611         struct hns3_hw *hw = &hns->hw;
1612         struct hns3_wait_data *wait_data = hw->reset.wait_data;
1613         struct timeval tv;
1614
1615         if (wait_data->result == HNS3_WAIT_SUCCESS) {
1616                 /*
1617                  * After vf reset is ready, the PF may not have completed
1618                  * the reset processing. The vf sending mbox to PF may fail
1619                  * during the pf reset, so it is better to add extra delay.
1620                  */
1621                 if (hw->reset.level == HNS3_VF_FUNC_RESET ||
1622                     hw->reset.level == HNS3_FLR_RESET)
1623                         return 0;
1624                 /* Reset retry process, no need to add extra delay. */
1625                 if (hw->reset.attempts)
1626                         return 0;
1627                 if (wait_data->check_completion == NULL)
1628                         return 0;
1629
1630                 wait_data->check_completion = NULL;
1631                 wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC;
1632                 wait_data->count = 1;
1633                 wait_data->result = HNS3_WAIT_REQUEST;
1634                 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
1635                                   wait_data);
1636                 hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete");
1637                 return -EAGAIN;
1638         } else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1639                 gettimeofday(&tv, NULL);
1640                 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1641                           tv.tv_sec, tv.tv_usec);
1642                 return -ETIME;
1643         } else if (wait_data->result == HNS3_WAIT_REQUEST)
1644                 return -EAGAIN;
1645
1646         wait_data->hns = hns;
1647         wait_data->check_completion = is_vf_reset_done;
1648         wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1649                                       HNS3VF_RESET_WAIT_MS + get_timeofday_ms();
1650         wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1651         wait_data->count = HNS3VF_RESET_WAIT_CNT;
1652         wait_data->result = HNS3_WAIT_REQUEST;
1653         rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1654         return -EAGAIN;
1655 }
1656
1657 static int
1658 hns3vf_prepare_reset(struct hns3_adapter *hns)
1659 {
1660         struct hns3_hw *hw = &hns->hw;
1661         int ret = 0;
1662
1663         if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1664                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
1665                                         0, true, NULL, 0);
1666         }
1667         rte_atomic16_set(&hw->reset.disable_cmd, 1);
1668
1669         return ret;
1670 }
1671
1672 static int
1673 hns3vf_stop_service(struct hns3_adapter *hns)
1674 {
1675         struct hns3_hw *hw = &hns->hw;
1676         struct rte_eth_dev *eth_dev;
1677
1678         eth_dev = &rte_eth_devices[hw->data->port_id];
1679         if (hw->adapter_state == HNS3_NIC_STARTED)
1680                 rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
1681         hw->mac.link_status = ETH_LINK_DOWN;
1682
1683         hns3_set_rxtx_function(eth_dev);
1684         rte_wmb();
1685         /* Disable datapath on secondary process. */
1686         hns3_mp_req_stop_rxtx(eth_dev);
1687         rte_delay_ms(hw->tqps_num);
1688
1689         rte_spinlock_lock(&hw->lock);
1690         if (hw->adapter_state == HNS3_NIC_STARTED ||
1691             hw->adapter_state == HNS3_NIC_STOPPING) {
1692                 hns3vf_do_stop(hns);
1693                 hw->reset.mbuf_deferred_free = true;
1694         } else
1695                 hw->reset.mbuf_deferred_free = false;
1696
1697         /*
1698          * It is cumbersome for hardware to pick-and-choose entries for deletion
1699          * from table space. Hence, for function reset software intervention is
1700          * required to delete the entries.
1701          */
1702         if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
1703                 hns3vf_configure_all_mc_mac_addr(hns, true);
1704         rte_spinlock_unlock(&hw->lock);
1705
1706         return 0;
1707 }
1708
1709 static int
1710 hns3vf_start_service(struct hns3_adapter *hns)
1711 {
1712         struct hns3_hw *hw = &hns->hw;
1713         struct rte_eth_dev *eth_dev;
1714
1715         eth_dev = &rte_eth_devices[hw->data->port_id];
1716         hns3_set_rxtx_function(eth_dev);
1717         hns3_mp_req_start_rxtx(eth_dev);
1718         if (hw->adapter_state == HNS3_NIC_STARTED)
1719                 hns3vf_service_handler(eth_dev);
1720
1721         return 0;
1722 }
1723
1724 static int
1725 hns3vf_check_default_mac_change(struct hns3_hw *hw)
1726 {
1727         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1728         struct rte_ether_addr *hw_mac;
1729         int ret;
1730
1731         /*
1732          * The hns3 PF ethdev driver in kernel support setting VF MAC address
1733          * on the host by "ip link set ..." command. If the hns3 PF kernel
1734          * ethdev driver sets the MAC address for VF device after the
1735          * initialization of the related VF device, the PF driver will notify
1736          * VF driver to reset VF device to make the new MAC address effective
1737          * immediately. The hns3 VF PMD driver should check whether the MAC
1738          * address has been changed by the PF kernel ethdev driver, if changed
1739          * VF driver should configure hardware using the new MAC address in the
1740          * recovering hardware configuration stage of the reset process.
1741          */
1742         ret = hns3vf_get_host_mac_addr(hw);
1743         if (ret)
1744                 return ret;
1745
1746         hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
1747         ret = rte_is_zero_ether_addr(hw_mac);
1748         if (ret) {
1749                 rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
1750         } else {
1751                 ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
1752                 if (!ret) {
1753                         rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
1754                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1755                                               &hw->data->mac_addrs[0]);
1756                         hns3_warn(hw, "Default MAC address has been changed to:"
1757                                   " %s by the host PF kernel ethdev driver",
1758                                   mac_str);
1759                 }
1760         }
1761
1762         return 0;
1763 }
1764
1765 static int
1766 hns3vf_restore_conf(struct hns3_adapter *hns)
1767 {
1768         struct hns3_hw *hw = &hns->hw;
1769         int ret;
1770
1771         ret = hns3vf_check_default_mac_change(hw);
1772         if (ret)
1773                 return ret;
1774
1775         ret = hns3vf_configure_mac_addr(hns, false);
1776         if (ret)
1777                 return ret;
1778
1779         ret = hns3vf_configure_all_mc_mac_addr(hns, false);
1780         if (ret)
1781                 goto err_mc_mac;
1782
1783         ret = hns3vf_restore_vlan_conf(hns);
1784         if (ret)
1785                 goto err_vlan_table;
1786
1787         if (hw->adapter_state == HNS3_NIC_STARTED) {
1788                 ret = hns3vf_do_start(hns, false);
1789                 if (ret)
1790                         goto err_vlan_table;
1791                 hns3_info(hw, "hns3vf dev restart successful!");
1792         } else if (hw->adapter_state == HNS3_NIC_STOPPING)
1793                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1794         return 0;
1795
1796 err_vlan_table:
1797         hns3vf_configure_all_mc_mac_addr(hns, true);
1798 err_mc_mac:
1799         hns3vf_configure_mac_addr(hns, true);
1800         return ret;
1801 }
1802
1803 static enum hns3_reset_level
1804 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
1805 {
1806         enum hns3_reset_level reset_level;
1807
1808         /* return the highest priority reset level amongst all */
1809         if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
1810                 reset_level = HNS3_VF_RESET;
1811         else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
1812                 reset_level = HNS3_VF_FULL_RESET;
1813         else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
1814                 reset_level = HNS3_VF_PF_FUNC_RESET;
1815         else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
1816                 reset_level = HNS3_VF_FUNC_RESET;
1817         else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
1818                 reset_level = HNS3_FLR_RESET;
1819         else
1820                 reset_level = HNS3_NONE_RESET;
1821
1822         if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
1823                 return HNS3_NONE_RESET;
1824
1825         return reset_level;
1826 }
1827
1828 static void
1829 hns3vf_reset_service(void *param)
1830 {
1831         struct hns3_adapter *hns = (struct hns3_adapter *)param;
1832         struct hns3_hw *hw = &hns->hw;
1833         enum hns3_reset_level reset_level;
1834         struct timeval tv_delta;
1835         struct timeval tv_start;
1836         struct timeval tv;
1837         uint64_t msec;
1838
1839         /*
1840          * The interrupt is not triggered within the delay time.
1841          * The interrupt may have been lost. It is necessary to handle
1842          * the interrupt to recover from the error.
1843          */
1844         if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
1845                 rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
1846                 hns3_err(hw, "Handling interrupts in delayed tasks");
1847                 hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
1848                 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
1849                 if (reset_level == HNS3_NONE_RESET) {
1850                         hns3_err(hw, "No reset level is set, try global reset");
1851                         hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
1852                 }
1853         }
1854         rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
1855
1856         /*
1857          * Hardware reset has been notified, we now have to poll & check if
1858          * hardware has actually completed the reset sequence.
1859          */
1860         reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
1861         if (reset_level != HNS3_NONE_RESET) {
1862                 gettimeofday(&tv_start, NULL);
1863                 hns3_reset_process(hns, reset_level);
1864                 gettimeofday(&tv, NULL);
1865                 timersub(&tv, &tv_start, &tv_delta);
1866                 msec = tv_delta.tv_sec * MSEC_PER_SEC +
1867                        tv_delta.tv_usec / USEC_PER_MSEC;
1868                 if (msec > HNS3_RESET_PROCESS_MS)
1869                         hns3_err(hw, "%d handle long time delta %" PRIx64
1870                                  " ms time=%ld.%.6ld",
1871                                  hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
1872         }
1873 }
1874
1875 static int
1876 hns3vf_reinit_dev(struct hns3_adapter *hns)
1877 {
1878         struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
1879         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1880         struct hns3_hw *hw = &hns->hw;
1881         int ret;
1882
1883         if (hw->reset.level == HNS3_VF_FULL_RESET) {
1884                 rte_intr_disable(&pci_dev->intr_handle);
1885                 hns3vf_set_bus_master(pci_dev, true);
1886         }
1887
1888         /* Firmware command initialize */
1889         ret = hns3_cmd_init(hw);
1890         if (ret) {
1891                 hns3_err(hw, "Failed to init cmd: %d", ret);
1892                 goto err_cmd_init;
1893         }
1894
1895         if (hw->reset.level == HNS3_VF_FULL_RESET) {
1896                 /*
1897                  * UIO enables msix by writing the pcie configuration space
1898                  * vfio_pci enables msix in rte_intr_enable.
1899                  */
1900                 if (pci_dev->kdrv == RTE_KDRV_IGB_UIO ||
1901                     pci_dev->kdrv == RTE_KDRV_UIO_GENERIC) {
1902                         if (hns3vf_enable_msix(pci_dev, true))
1903                                 hns3_err(hw, "Failed to enable msix");
1904                 }
1905
1906                 rte_intr_enable(&pci_dev->intr_handle);
1907         }
1908
1909         ret = hns3_reset_all_queues(hns);
1910         if (ret) {
1911                 hns3_err(hw, "Failed to reset all queues: %d", ret);
1912                 goto err_init;
1913         }
1914
1915         ret = hns3vf_init_hardware(hns);
1916         if (ret) {
1917                 hns3_err(hw, "Failed to init hardware: %d", ret);
1918                 goto err_init;
1919         }
1920
1921         return 0;
1922
1923 err_cmd_init:
1924         hns3vf_set_bus_master(pci_dev, false);
1925 err_init:
1926         hns3_cmd_uninit(hw);
1927         return ret;
1928 }
1929
1930 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
1931         .dev_start          = hns3vf_dev_start,
1932         .dev_stop           = hns3vf_dev_stop,
1933         .dev_close          = hns3vf_dev_close,
1934         .mtu_set            = hns3vf_dev_mtu_set,
1935         .stats_get          = hns3_stats_get,
1936         .stats_reset        = hns3_stats_reset,
1937         .xstats_get         = hns3_dev_xstats_get,
1938         .xstats_get_names   = hns3_dev_xstats_get_names,
1939         .xstats_reset       = hns3_dev_xstats_reset,
1940         .xstats_get_by_id   = hns3_dev_xstats_get_by_id,
1941         .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
1942         .dev_infos_get      = hns3vf_dev_infos_get,
1943         .rx_queue_setup     = hns3_rx_queue_setup,
1944         .tx_queue_setup     = hns3_tx_queue_setup,
1945         .rx_queue_release   = hns3_dev_rx_queue_release,
1946         .tx_queue_release   = hns3_dev_tx_queue_release,
1947         .rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
1948         .rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
1949         .dev_configure      = hns3vf_dev_configure,
1950         .mac_addr_add       = hns3vf_add_mac_addr,
1951         .mac_addr_remove    = hns3vf_remove_mac_addr,
1952         .mac_addr_set       = hns3vf_set_default_mac_addr,
1953         .set_mc_addr_list   = hns3vf_set_mc_mac_addr_list,
1954         .link_update        = hns3vf_dev_link_update,
1955         .rss_hash_update    = hns3_dev_rss_hash_update,
1956         .rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
1957         .reta_update        = hns3_dev_rss_reta_update,
1958         .reta_query         = hns3_dev_rss_reta_query,
1959         .filter_ctrl        = hns3_dev_filter_ctrl,
1960         .vlan_filter_set    = hns3vf_vlan_filter_set,
1961         .vlan_offload_set   = hns3vf_vlan_offload_set,
1962         .get_reg            = hns3_get_regs,
1963         .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
1964 };
1965
1966 static const struct hns3_reset_ops hns3vf_reset_ops = {
1967         .reset_service       = hns3vf_reset_service,
1968         .stop_service        = hns3vf_stop_service,
1969         .prepare_reset       = hns3vf_prepare_reset,
1970         .wait_hardware_ready = hns3vf_wait_hardware_ready,
1971         .reinit_dev          = hns3vf_reinit_dev,
1972         .restore_conf        = hns3vf_restore_conf,
1973         .start_service       = hns3vf_start_service,
1974 };
1975
1976 static int
1977 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
1978 {
1979         struct hns3_adapter *hns = eth_dev->data->dev_private;
1980         struct hns3_hw *hw = &hns->hw;
1981         int ret;
1982
1983         PMD_INIT_FUNC_TRACE();
1984
1985         eth_dev->process_private = (struct hns3_process_private *)
1986             rte_zmalloc_socket("hns3_filter_list",
1987                                sizeof(struct hns3_process_private),
1988                                RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
1989         if (eth_dev->process_private == NULL) {
1990                 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
1991                 return -ENOMEM;
1992         }
1993
1994         /* initialize flow filter lists */
1995         hns3_filterlist_init(eth_dev);
1996
1997         hns3_set_rxtx_function(eth_dev);
1998         eth_dev->dev_ops = &hns3vf_eth_dev_ops;
1999         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2000                 hns3_mp_init_secondary();
2001                 hw->secondary_cnt++;
2002                 return 0;
2003         }
2004
2005         hns3_mp_init_primary();
2006
2007         hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2008         hns->is_vf = true;
2009         hw->data = eth_dev->data;
2010
2011         ret = hns3_reset_init(hw);
2012         if (ret)
2013                 goto err_init_reset;
2014         hw->reset.ops = &hns3vf_reset_ops;
2015
2016         ret = hns3vf_init_vf(eth_dev);
2017         if (ret) {
2018                 PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2019                 goto err_init_vf;
2020         }
2021
2022         /* Allocate memory for storing MAC addresses */
2023         eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac",
2024                                                sizeof(struct rte_ether_addr) *
2025                                                HNS3_VF_UC_MACADDR_NUM, 0);
2026         if (eth_dev->data->mac_addrs == NULL) {
2027                 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
2028                              "to store MAC addresses",
2029                              sizeof(struct rte_ether_addr) *
2030                              HNS3_VF_UC_MACADDR_NUM);
2031                 ret = -ENOMEM;
2032                 goto err_rte_zmalloc;
2033         }
2034
2035         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
2036                             &eth_dev->data->mac_addrs[0]);
2037         hw->adapter_state = HNS3_NIC_INITIALIZED;
2038         /*
2039          * Pass the information to the rte_eth_dev_close() that it should also
2040          * release the private port resources.
2041          */
2042         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2043
2044         if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
2045                 hns3_err(hw, "Reschedule reset service after dev_init");
2046                 hns3_schedule_reset(hns);
2047         } else {
2048                 /* IMP will wait ready flag before reset */
2049                 hns3_notify_reset_ready(hw, false);
2050         }
2051         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
2052                           eth_dev);
2053         return 0;
2054
2055 err_rte_zmalloc:
2056         hns3vf_uninit_vf(eth_dev);
2057
2058 err_init_vf:
2059         rte_free(hw->reset.wait_data);
2060
2061 err_init_reset:
2062         eth_dev->dev_ops = NULL;
2063         eth_dev->rx_pkt_burst = NULL;
2064         eth_dev->tx_pkt_burst = NULL;
2065         eth_dev->tx_pkt_prepare = NULL;
2066         rte_free(eth_dev->process_private);
2067         eth_dev->process_private = NULL;
2068
2069         return ret;
2070 }
2071
2072 static int
2073 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
2074 {
2075         struct hns3_adapter *hns = eth_dev->data->dev_private;
2076         struct hns3_hw *hw = &hns->hw;
2077
2078         PMD_INIT_FUNC_TRACE();
2079
2080         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2081                 return -EPERM;
2082
2083         eth_dev->dev_ops = NULL;
2084         eth_dev->rx_pkt_burst = NULL;
2085         eth_dev->tx_pkt_burst = NULL;
2086         eth_dev->tx_pkt_prepare = NULL;
2087
2088         if (hw->adapter_state < HNS3_NIC_CLOSING)
2089                 hns3vf_dev_close(eth_dev);
2090
2091         hw->adapter_state = HNS3_NIC_REMOVED;
2092         return 0;
2093 }
2094
2095 static int
2096 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2097                      struct rte_pci_device *pci_dev)
2098 {
2099         return rte_eth_dev_pci_generic_probe(pci_dev,
2100                                              sizeof(struct hns3_adapter),
2101                                              hns3vf_dev_init);
2102 }
2103
2104 static int
2105 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
2106 {
2107         return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
2108 }
2109
2110 static const struct rte_pci_id pci_id_hns3vf_map[] = {
2111         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
2112         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
2113         { .vendor_id = 0, /* sentinel */ },
2114 };
2115
2116 static struct rte_pci_driver rte_hns3vf_pmd = {
2117         .id_table = pci_id_hns3vf_map,
2118         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2119         .probe = eth_hns3vf_pci_probe,
2120         .remove = eth_hns3vf_pci_remove,
2121 };
2122
2123 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
2124 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
2125 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");