net/hns3: fix Tx interrupt when enabling Rx interrupt
[dpdk.git] / drivers / net / hns3 / hns3_ethdev_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <arpa/inet.h>
12 #include <linux/pci_regs.h>
13
14 #include <rte_alarm.h>
15 #include <rte_atomic.h>
16 #include <rte_bus_pci.h>
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_eal.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_ethdev_pci.h>
25 #include <rte_interrupts.h>
26 #include <rte_io.h>
27 #include <rte_log.h>
28 #include <rte_pci.h>
29 #include <rte_vfio.h>
30
31 #include "hns3_ethdev.h"
32 #include "hns3_logs.h"
33 #include "hns3_rxtx.h"
34 #include "hns3_regs.h"
35 #include "hns3_intr.h"
36 #include "hns3_dcb.h"
37 #include "hns3_mp.h"
38
39 #define HNS3VF_KEEP_ALIVE_INTERVAL      2000000 /* us */
40 #define HNS3VF_SERVICE_INTERVAL         1000000 /* us */
41
42 #define HNS3VF_RESET_WAIT_MS    20
43 #define HNS3VF_RESET_WAIT_CNT   2000
44
45 /* Reset related Registers */
46 #define HNS3_GLOBAL_RESET_BIT           0
47 #define HNS3_CORE_RESET_BIT             1
48 #define HNS3_IMP_RESET_BIT              2
49 #define HNS3_FUN_RST_ING_B              0
50
51 enum hns3vf_evt_cause {
52         HNS3VF_VECTOR0_EVENT_RST,
53         HNS3VF_VECTOR0_EVENT_MBX,
54         HNS3VF_VECTOR0_EVENT_OTHER,
55 };
56
57 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
58                                                     uint64_t *levels);
59 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
60 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
61
62 /* set PCI bus mastering */
63 static void
64 hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
65 {
66         uint16_t reg;
67
68         rte_pci_read_config(device, &reg, sizeof(reg), PCI_COMMAND);
69
70         if (op)
71                 /* set the master bit */
72                 reg |= PCI_COMMAND_MASTER;
73         else
74                 reg &= ~(PCI_COMMAND_MASTER);
75
76         rte_pci_write_config(device, &reg, sizeof(reg), PCI_COMMAND);
77 }
78
79 /**
80  * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
81  * @cap: the capability
82  *
83  * Return the address of the given capability within the PCI capability list.
84  */
85 static int
86 hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
87 {
88 #define MAX_PCIE_CAPABILITY 48
89         uint16_t status;
90         uint8_t pos;
91         uint8_t id;
92         int ttl;
93
94         rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
95         if (!(status & PCI_STATUS_CAP_LIST))
96                 return 0;
97
98         ttl = MAX_PCIE_CAPABILITY;
99         rte_pci_read_config(device, &pos, sizeof(pos), PCI_CAPABILITY_LIST);
100         while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
101                 rte_pci_read_config(device, &id, sizeof(id),
102                                     (pos + PCI_CAP_LIST_ID));
103
104                 if (id == 0xFF)
105                         break;
106
107                 if (id == cap)
108                         return (int)pos;
109
110                 rte_pci_read_config(device, &pos, sizeof(pos),
111                                     (pos + PCI_CAP_LIST_NEXT));
112         }
113         return 0;
114 }
115
116 static int
117 hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
118 {
119         uint16_t control;
120         int pos;
121
122         pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
123         if (pos) {
124                 rte_pci_read_config(device, &control, sizeof(control),
125                                     (pos + PCI_MSIX_FLAGS));
126                 if (op)
127                         control |= PCI_MSIX_FLAGS_ENABLE;
128                 else
129                         control &= ~PCI_MSIX_FLAGS_ENABLE;
130                 rte_pci_write_config(device, &control, sizeof(control),
131                                      (pos + PCI_MSIX_FLAGS));
132                 return 0;
133         }
134         return -ENXIO;
135 }
136
137 static int
138 hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
139                     __attribute__ ((unused)) uint32_t idx,
140                     __attribute__ ((unused)) uint32_t pool)
141 {
142         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
143         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
144         int ret;
145
146         rte_spinlock_lock(&hw->lock);
147         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
148                                 HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
149                                 RTE_ETHER_ADDR_LEN, false, NULL, 0);
150         rte_spinlock_unlock(&hw->lock);
151         if (ret) {
152                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
153                                       mac_addr);
154                 hns3_err(hw, "Failed to add mac addr(%s) for vf: %d", mac_str,
155                          ret);
156         }
157
158         return ret;
159 }
160
161 static void
162 hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
163 {
164         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
165         /* index will be checked by upper level rte interface */
166         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
167         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
168         int ret;
169
170         rte_spinlock_lock(&hw->lock);
171         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
172                                 HNS3_MBX_MAC_VLAN_UC_REMOVE,
173                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
174                                 NULL, 0);
175         rte_spinlock_unlock(&hw->lock);
176         if (ret) {
177                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
178                                       mac_addr);
179                 hns3_err(hw, "Failed to remove mac addr(%s) for vf: %d",
180                          mac_str, ret);
181         }
182 }
183
184 static int
185 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
186                             struct rte_ether_addr *mac_addr)
187 {
188 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
189         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
190         struct rte_ether_addr *old_addr;
191         uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
192         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
193         int ret;
194
195         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
196                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
197                                       mac_addr);
198                 hns3_err(hw, "Failed to set mac addr, addr(%s) invalid.",
199                          mac_str);
200                 return -EINVAL;
201         }
202
203         old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
204         rte_spinlock_lock(&hw->lock);
205         memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
206         memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
207                RTE_ETHER_ADDR_LEN);
208
209         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
210                                 HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
211                                 HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0);
212         if (ret) {
213                 /*
214                  * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev
215                  * driver. When user has configured a MAC address for VF device
216                  * by "ip link set ..." command based on the PF device, the hns3
217                  * PF kernel ethdev driver does not allow VF driver to request
218                  * reconfiguring a different default MAC address, and return
219                  * -EPREM to VF driver through mailbox.
220                  */
221                 if (ret == -EPERM) {
222                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
223                                               old_addr);
224                         hns3_warn(hw, "Has permanet mac addr(%s) for vf",
225                                   mac_str);
226                 } else {
227                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
228                                               mac_addr);
229                         hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
230                                  mac_str, ret);
231                 }
232         }
233
234         rte_ether_addr_copy(mac_addr,
235                             (struct rte_ether_addr *)hw->mac.mac_addr);
236         rte_spinlock_unlock(&hw->lock);
237
238         return ret;
239 }
240
241 static int
242 hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del)
243 {
244         struct hns3_hw *hw = &hns->hw;
245         struct rte_ether_addr *addr;
246         enum hns3_mbx_mac_vlan_subcode opcode;
247         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
248         int ret = 0;
249         int i;
250
251         if (del)
252                 opcode = HNS3_MBX_MAC_VLAN_UC_REMOVE;
253         else
254                 opcode = HNS3_MBX_MAC_VLAN_UC_ADD;
255         for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) {
256                 addr = &hw->data->mac_addrs[i];
257                 if (!rte_is_valid_assigned_ether_addr(addr))
258                         continue;
259                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
260                 hns3_dbg(hw, "rm mac addr: %s", mac_str);
261                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, opcode,
262                                         addr->addr_bytes, RTE_ETHER_ADDR_LEN,
263                                         false, NULL, 0);
264                 if (ret) {
265                         hns3_err(hw, "Failed to remove mac addr for vf: %d",
266                                  ret);
267                         break;
268                 }
269         }
270         return ret;
271 }
272
273 static int
274 hns3vf_add_mc_mac_addr(struct hns3_adapter *hns,
275                        struct rte_ether_addr *mac_addr)
276 {
277         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
278         struct hns3_hw *hw = &hns->hw;
279         int ret;
280
281         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
282                                 HNS3_MBX_MAC_VLAN_MC_ADD,
283                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
284                                 NULL, 0);
285         if (ret) {
286                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
287                                       mac_addr);
288                 hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
289                          mac_str, ret);
290                 return ret;
291         }
292
293         return 0;
294 }
295
296 static int
297 hns3vf_remove_mc_mac_addr(struct hns3_adapter *hns,
298                           struct rte_ether_addr *mac_addr)
299 {
300         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
301         struct hns3_hw *hw = &hns->hw;
302         int ret;
303
304         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
305                                 HNS3_MBX_MAC_VLAN_MC_REMOVE,
306                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
307                                 NULL, 0);
308         if (ret) {
309                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
310                                       mac_addr);
311                 hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
312                          mac_str, ret);
313                 return ret;
314         }
315
316         return 0;
317 }
318
319 static int
320 hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev,
321                             struct rte_ether_addr *mc_addr_set,
322                             uint32_t nb_mc_addr)
323 {
324         struct hns3_adapter *hns = dev->data->dev_private;
325         struct hns3_hw *hw = &hns->hw;
326         struct rte_ether_addr *addr;
327         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
328         int cur_addr_num;
329         int set_addr_num;
330         int num;
331         int ret;
332         int i;
333
334         if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
335                 hns3_err(hw, "Failed to set mc mac addr, nb_mc_addr(%d) "
336                          "invalid. valid range: 0~%d",
337                          nb_mc_addr, HNS3_MC_MACADDR_NUM);
338                 return -EINVAL;
339         }
340
341         set_addr_num = (int)nb_mc_addr;
342         for (i = 0; i < set_addr_num; i++) {
343                 addr = &mc_addr_set[i];
344                 if (!rte_is_multicast_ether_addr(addr)) {
345                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
346                                               addr);
347                         hns3_err(hw,
348                                  "Failed to set mc mac addr, addr(%s) invalid.",
349                                  mac_str);
350                         return -EINVAL;
351                 }
352         }
353         rte_spinlock_lock(&hw->lock);
354         cur_addr_num = hw->mc_addrs_num;
355         for (i = 0; i < cur_addr_num; i++) {
356                 num = cur_addr_num - i - 1;
357                 addr = &hw->mc_addrs[num];
358                 ret = hns3vf_remove_mc_mac_addr(hns, addr);
359                 if (ret) {
360                         rte_spinlock_unlock(&hw->lock);
361                         return ret;
362                 }
363
364                 hw->mc_addrs_num--;
365         }
366
367         for (i = 0; i < set_addr_num; i++) {
368                 addr = &mc_addr_set[i];
369                 ret = hns3vf_add_mc_mac_addr(hns, addr);
370                 if (ret) {
371                         rte_spinlock_unlock(&hw->lock);
372                         return ret;
373                 }
374
375                 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
376                 hw->mc_addrs_num++;
377         }
378         rte_spinlock_unlock(&hw->lock);
379
380         return 0;
381 }
382
383 static int
384 hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
385 {
386         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
387         struct hns3_hw *hw = &hns->hw;
388         struct rte_ether_addr *addr;
389         int err = 0;
390         int ret;
391         int i;
392
393         for (i = 0; i < hw->mc_addrs_num; i++) {
394                 addr = &hw->mc_addrs[i];
395                 if (!rte_is_multicast_ether_addr(addr))
396                         continue;
397                 if (del)
398                         ret = hns3vf_remove_mc_mac_addr(hns, addr);
399                 else
400                         ret = hns3vf_add_mc_mac_addr(hns, addr);
401                 if (ret) {
402                         err = ret;
403                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
404                                               addr);
405                         hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d",
406                                  del ? "Remove" : "Restore", mac_str, ret);
407                 }
408         }
409         return err;
410 }
411
412 static int
413 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
414 {
415         struct hns3_mbx_vf_to_pf_cmd *req;
416         struct hns3_cmd_desc desc;
417         int ret;
418
419         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
420
421         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
422         req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
423         req->msg[1] = en_bc_pmc ? 1 : 0;
424
425         ret = hns3_cmd_send(hw, &desc, 1);
426         if (ret)
427                 hns3_err(hw, "Set promisc mode fail, status is %d", ret);
428
429         return ret;
430 }
431
432 static int
433 hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
434                              bool mmap, enum hns3_ring_type queue_type,
435                              uint16_t queue_id)
436 {
437         struct hns3_vf_bind_vector_msg bind_msg;
438         const char *op_str;
439         uint16_t code;
440         int ret;
441
442         memset(&bind_msg, 0, sizeof(bind_msg));
443         code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
444                 HNS3_MBX_UNMAP_RING_TO_VECTOR;
445         bind_msg.vector_id = vector_id;
446
447         if (queue_type == HNS3_RING_TYPE_RX)
448                 bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
449         else
450                 bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
451
452         bind_msg.param[0].ring_type = queue_type;
453         bind_msg.ring_num = 1;
454         bind_msg.param[0].tqp_index = queue_id;
455         op_str = mmap ? "Map" : "Unmap";
456         ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
457                                 sizeof(bind_msg), false, NULL, 0);
458         if (ret) {
459                 hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.",
460                          op_str, queue_id, bind_msg.vector_id, ret);
461                 return ret;
462         }
463
464         return 0;
465 }
466
467 static int
468 hns3vf_init_ring_with_vector(struct hns3_hw *hw)
469 {
470         uint8_t vec;
471         int ret;
472         int i;
473
474         /*
475          * In hns3 network engine, vector 0 is always the misc interrupt of this
476          * function, vector 1~N can be used respectively for the queues of the
477          * function. Tx and Rx queues with the same number share the interrupt
478          * vector. In the initialization clearing the all hardware mapping
479          * relationship configurations between queues and interrupt vectors is
480          * needed, so some error caused by the residual configurations, such as
481          * the unexpected Tx interrupt, can be avoid. Because of the hardware
482          * constraints in hns3 hardware engine, we have to implement clearing
483          * the mapping relationship configurations by binding all queues to the
484          * last interrupt vector and reserving the last interrupt vector. This
485          * method results in a decrease of the maximum queues when upper
486          * applications call the rte_eth_dev_configure API function to enable
487          * Rx interrupt.
488          */
489         vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
490         hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
491         for (i = 0; i < hw->intr_tqps_num; i++) {
492                 /*
493                  * Set gap limiter and rate limiter configuration of queue's
494                  * interrupt.
495                  */
496                 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
497                                        HNS3_TQP_INTR_GL_DEFAULT);
498                 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
499                                        HNS3_TQP_INTR_GL_DEFAULT);
500                 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
501
502                 ret = hns3vf_bind_ring_with_vector(hw, vec, false,
503                                                    HNS3_RING_TYPE_TX, i);
504                 if (ret) {
505                         PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
506                                           "vector: %d, ret=%d", i, vec, ret);
507                         return ret;
508                 }
509
510                 ret = hns3vf_bind_ring_with_vector(hw, vec, false,
511                                                    HNS3_RING_TYPE_RX, i);
512                 if (ret) {
513                         PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
514                                           "vector: %d, ret=%d", i, vec, ret);
515                         return ret;
516                 }
517         }
518
519         return 0;
520 }
521
522 static int
523 hns3vf_dev_configure(struct rte_eth_dev *dev)
524 {
525         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
526         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
527         struct rte_eth_conf *conf = &dev->data->dev_conf;
528         enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
529         uint16_t nb_rx_q = dev->data->nb_rx_queues;
530         uint16_t nb_tx_q = dev->data->nb_tx_queues;
531         struct rte_eth_rss_conf rss_conf;
532         uint16_t mtu;
533         int ret;
534
535         /*
536          * Hardware does not support individually enable/disable/reset the Tx or
537          * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
538          * and Rx queues at the same time. When the numbers of Tx queues
539          * allocated by upper applications are not equal to the numbers of Rx
540          * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
541          * of Tx/Rx queues. otherwise, network engine can not work as usual. But
542          * these fake queues are imperceptible, and can not be used by upper
543          * applications.
544          */
545         ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
546         if (ret) {
547                 hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
548                 return ret;
549         }
550
551         hw->adapter_state = HNS3_NIC_CONFIGURING;
552         if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
553                 hns3_err(hw, "setting link speed/duplex not supported");
554                 ret = -EINVAL;
555                 goto cfg_err;
556         }
557
558         /* When RSS is not configured, redirect the packet queue 0 */
559         if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
560                 rss_conf = conf->rx_adv_conf.rss_conf;
561                 if (rss_conf.rss_key == NULL) {
562                         rss_conf.rss_key = rss_cfg->key;
563                         rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
564                 }
565
566                 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
567                 if (ret)
568                         goto cfg_err;
569         }
570
571         /*
572          * If jumbo frames are enabled, MTU needs to be refreshed
573          * according to the maximum RX packet length.
574          */
575         if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
576                 /*
577                  * Security of max_rx_pkt_len is guaranteed in dpdk frame.
578                  * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
579                  * can safely assign to "uint16_t" type variable.
580                  */
581                 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
582                 ret = hns3vf_dev_mtu_set(dev, mtu);
583                 if (ret)
584                         goto cfg_err;
585                 dev->data->mtu = mtu;
586         }
587
588         ret = hns3vf_dev_configure_vlan(dev);
589         if (ret)
590                 goto cfg_err;
591
592         hw->adapter_state = HNS3_NIC_CONFIGURED;
593         return 0;
594
595 cfg_err:
596         (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
597         hw->adapter_state = HNS3_NIC_INITIALIZED;
598
599         return ret;
600 }
601
602 static int
603 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
604 {
605         int ret;
606
607         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
608                                 sizeof(mtu), true, NULL, 0);
609         if (ret)
610                 hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
611
612         return ret;
613 }
614
615 static int
616 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
617 {
618         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
619         uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
620         int ret;
621
622         if (dev->data->dev_started) {
623                 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
624                          "before configuration", dev->data->port_id);
625                 return -EBUSY;
626         }
627
628         if (rte_atomic16_read(&hw->reset.resetting)) {
629                 hns3_err(hw, "Failed to set mtu during resetting");
630                 return -EIO;
631         }
632
633         rte_spinlock_lock(&hw->lock);
634         ret = hns3vf_config_mtu(hw, mtu);
635         if (ret) {
636                 rte_spinlock_unlock(&hw->lock);
637                 return ret;
638         }
639         if (frame_size > RTE_ETHER_MAX_LEN)
640                 dev->data->dev_conf.rxmode.offloads |=
641                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
642         else
643                 dev->data->dev_conf.rxmode.offloads &=
644                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
645         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
646         rte_spinlock_unlock(&hw->lock);
647
648         return 0;
649 }
650
651 static int
652 hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
653 {
654         struct hns3_adapter *hns = eth_dev->data->dev_private;
655         struct hns3_hw *hw = &hns->hw;
656         uint16_t q_num = hw->tqps_num;
657
658         /*
659          * In interrupt mode, 'max_rx_queues' is set based on the number of
660          * MSI-X interrupt resources of the hardware.
661          */
662         if (hw->data->dev_conf.intr_conf.rxq == 1)
663                 q_num = hw->intr_tqps_num;
664
665         info->max_rx_queues = q_num;
666         info->max_tx_queues = hw->tqps_num;
667         info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
668         info->min_rx_bufsize = hw->rx_buf_len;
669         info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
670         info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
671
672         info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
673                                  DEV_RX_OFFLOAD_UDP_CKSUM |
674                                  DEV_RX_OFFLOAD_TCP_CKSUM |
675                                  DEV_RX_OFFLOAD_SCTP_CKSUM |
676                                  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
677                                  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
678                                  DEV_RX_OFFLOAD_KEEP_CRC |
679                                  DEV_RX_OFFLOAD_SCATTER |
680                                  DEV_RX_OFFLOAD_VLAN_STRIP |
681                                  DEV_RX_OFFLOAD_QINQ_STRIP |
682                                  DEV_RX_OFFLOAD_VLAN_FILTER |
683                                  DEV_RX_OFFLOAD_JUMBO_FRAME);
684         info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
685         info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
686                                  DEV_TX_OFFLOAD_IPV4_CKSUM |
687                                  DEV_TX_OFFLOAD_TCP_CKSUM |
688                                  DEV_TX_OFFLOAD_UDP_CKSUM |
689                                  DEV_TX_OFFLOAD_SCTP_CKSUM |
690                                  DEV_TX_OFFLOAD_VLAN_INSERT |
691                                  DEV_TX_OFFLOAD_QINQ_INSERT |
692                                  DEV_TX_OFFLOAD_MULTI_SEGS |
693                                  DEV_TX_OFFLOAD_TCP_TSO |
694                                  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
695                                  DEV_TX_OFFLOAD_GRE_TNL_TSO |
696                                  DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
697                                  info->tx_queue_offload_capa);
698
699         info->rx_desc_lim = (struct rte_eth_desc_lim) {
700                 .nb_max = HNS3_MAX_RING_DESC,
701                 .nb_min = HNS3_MIN_RING_DESC,
702                 .nb_align = HNS3_ALIGN_RING_DESC,
703         };
704
705         info->tx_desc_lim = (struct rte_eth_desc_lim) {
706                 .nb_max = HNS3_MAX_RING_DESC,
707                 .nb_min = HNS3_MIN_RING_DESC,
708                 .nb_align = HNS3_ALIGN_RING_DESC,
709         };
710
711         info->vmdq_queue_num = 0;
712
713         info->reta_size = HNS3_RSS_IND_TBL_SIZE;
714         info->hash_key_size = HNS3_RSS_KEY_SIZE;
715         info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
716         info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
717         info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
718
719         return 0;
720 }
721
722 static void
723 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
724 {
725         hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
726 }
727
728 static void
729 hns3vf_disable_irq0(struct hns3_hw *hw)
730 {
731         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
732 }
733
734 static void
735 hns3vf_enable_irq0(struct hns3_hw *hw)
736 {
737         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
738 }
739
740 static enum hns3vf_evt_cause
741 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
742 {
743         struct hns3_hw *hw = &hns->hw;
744         enum hns3vf_evt_cause ret;
745         uint32_t cmdq_stat_reg;
746         uint32_t rst_ing_reg;
747         uint32_t val;
748
749         /* Fetch the events from their corresponding regs */
750         cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
751
752         if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
753                 rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
754                 hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
755                 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
756                 rte_atomic16_set(&hw->reset.disable_cmd, 1);
757                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
758                 hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
759                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
760                 if (clearval) {
761                         hw->reset.stats.global_cnt++;
762                         hns3_warn(hw, "Global reset detected, clear reset status");
763                 } else {
764                         hns3_schedule_delayed_reset(hns);
765                         hns3_warn(hw, "Global reset detected, don't clear reset status");
766                 }
767
768                 ret = HNS3VF_VECTOR0_EVENT_RST;
769                 goto out;
770         }
771
772         /* Check for vector0 mailbox(=CMDQ RX) event source */
773         if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
774                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
775                 ret = HNS3VF_VECTOR0_EVENT_MBX;
776                 goto out;
777         }
778
779         val = 0;
780         ret = HNS3VF_VECTOR0_EVENT_OTHER;
781 out:
782         if (clearval)
783                 *clearval = val;
784         return ret;
785 }
786
787 static void
788 hns3vf_interrupt_handler(void *param)
789 {
790         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
791         struct hns3_adapter *hns = dev->data->dev_private;
792         struct hns3_hw *hw = &hns->hw;
793         enum hns3vf_evt_cause event_cause;
794         uint32_t clearval;
795
796         if (hw->irq_thread_id == 0)
797                 hw->irq_thread_id = pthread_self();
798
799         /* Disable interrupt */
800         hns3vf_disable_irq0(hw);
801
802         /* Read out interrupt causes */
803         event_cause = hns3vf_check_event_cause(hns, &clearval);
804
805         switch (event_cause) {
806         case HNS3VF_VECTOR0_EVENT_RST:
807                 hns3_schedule_reset(hns);
808                 break;
809         case HNS3VF_VECTOR0_EVENT_MBX:
810                 hns3_dev_handle_mbx_msg(hw);
811                 break;
812         default:
813                 break;
814         }
815
816         /* Clear interrupt causes */
817         hns3vf_clear_event_cause(hw, clearval);
818
819         /* Enable interrupt */
820         hns3vf_enable_irq0(hw);
821 }
822
823 static int
824 hns3vf_check_tqp_info(struct hns3_hw *hw)
825 {
826         uint16_t tqps_num;
827
828         tqps_num = hw->tqps_num;
829         if (tqps_num > HNS3_MAX_TQP_NUM_PER_FUNC || tqps_num == 0) {
830                 PMD_INIT_LOG(ERR, "Get invalid tqps_num(%u) from PF. valid "
831                                   "range: 1~%d",
832                              tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
833                 return -EINVAL;
834         }
835
836         if (hw->rx_buf_len == 0)
837                 hw->rx_buf_len = HNS3_DEFAULT_RX_BUF_LEN;
838         hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, hw->tqps_num);
839
840         return 0;
841 }
842
843 static int
844 hns3vf_get_queue_info(struct hns3_hw *hw)
845 {
846 #define HNS3VF_TQPS_RSS_INFO_LEN        6
847         uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
848         int ret;
849
850         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
851                                 resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
852         if (ret) {
853                 PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
854                 return ret;
855         }
856
857         memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
858         memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
859         memcpy(&hw->rx_buf_len, &resp_msg[4], sizeof(uint16_t));
860
861         return hns3vf_check_tqp_info(hw);
862 }
863
864 static int
865 hns3vf_get_queue_depth(struct hns3_hw *hw)
866 {
867 #define HNS3VF_TQPS_DEPTH_INFO_LEN      4
868         uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN];
869         int ret;
870
871         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true,
872                                 resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN);
873         if (ret) {
874                 PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d",
875                              ret);
876                 return ret;
877         }
878
879         memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t));
880         memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t));
881
882         return 0;
883 }
884
885 static int
886 hns3vf_get_tc_info(struct hns3_hw *hw)
887 {
888         uint8_t resp_msg;
889         int ret;
890
891         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0,
892                                 true, &resp_msg, sizeof(resp_msg));
893         if (ret) {
894                 hns3_err(hw, "VF request to get TC info from PF failed %d",
895                          ret);
896                 return ret;
897         }
898
899         hw->hw_tc_map = resp_msg;
900
901         return 0;
902 }
903
904 static int
905 hns3vf_get_host_mac_addr(struct hns3_hw *hw)
906 {
907         uint8_t host_mac[RTE_ETHER_ADDR_LEN];
908         int ret;
909
910         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0,
911                                 true, host_mac, RTE_ETHER_ADDR_LEN);
912         if (ret) {
913                 hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
914                 return ret;
915         }
916
917         memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
918
919         return 0;
920 }
921
922 static int
923 hns3vf_get_configuration(struct hns3_hw *hw)
924 {
925         int ret;
926
927         hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
928
929         /* Get queue configuration from PF */
930         ret = hns3vf_get_queue_info(hw);
931         if (ret)
932                 return ret;
933
934         /* Get queue depth info from PF */
935         ret = hns3vf_get_queue_depth(hw);
936         if (ret)
937                 return ret;
938
939         /* Get user defined VF MAC addr from PF */
940         ret = hns3vf_get_host_mac_addr(hw);
941         if (ret)
942                 return ret;
943
944         /* Get tc configuration from PF */
945         return hns3vf_get_tc_info(hw);
946 }
947
948 static int
949 hns3vf_set_tc_info(struct hns3_adapter *hns)
950 {
951         struct hns3_hw *hw = &hns->hw;
952         uint16_t nb_rx_q = hw->data->nb_rx_queues;
953         uint16_t nb_tx_q = hw->data->nb_tx_queues;
954         uint8_t i;
955
956         hw->num_tc = 0;
957         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
958                 if (hw->hw_tc_map & BIT(i))
959                         hw->num_tc++;
960
961         if (nb_rx_q < hw->num_tc) {
962                 hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
963                          nb_rx_q, hw->num_tc);
964                 return -EINVAL;
965         }
966
967         if (nb_tx_q < hw->num_tc) {
968                 hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
969                          nb_tx_q, hw->num_tc);
970                 return -EINVAL;
971         }
972
973         hns3_set_rss_size(hw, nb_rx_q);
974         hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
975
976         return 0;
977 }
978
979 static void
980 hns3vf_request_link_info(struct hns3_hw *hw)
981 {
982         uint8_t resp_msg;
983         int ret;
984
985         if (rte_atomic16_read(&hw->reset.resetting))
986                 return;
987         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
988                                 &resp_msg, sizeof(resp_msg));
989         if (ret)
990                 hns3_err(hw, "Failed to fetch link status from PF: %d", ret);
991 }
992
993 static int
994 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
995 {
996 #define HNS3VF_VLAN_MBX_MSG_LEN 5
997         struct hns3_hw *hw = &hns->hw;
998         uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
999         uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
1000         uint8_t is_kill = on ? 0 : 1;
1001
1002         msg_data[0] = is_kill;
1003         memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1004         memcpy(&msg_data[3], &proto, sizeof(proto));
1005
1006         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
1007                                  msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
1008                                  0);
1009 }
1010
1011 static int
1012 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1013 {
1014         struct hns3_adapter *hns = dev->data->dev_private;
1015         struct hns3_hw *hw = &hns->hw;
1016         int ret;
1017
1018         if (rte_atomic16_read(&hw->reset.resetting)) {
1019                 hns3_err(hw,
1020                          "vf set vlan id failed during resetting, vlan_id =%u",
1021                          vlan_id);
1022                 return -EIO;
1023         }
1024         rte_spinlock_lock(&hw->lock);
1025         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1026         rte_spinlock_unlock(&hw->lock);
1027         if (ret)
1028                 hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
1029                          vlan_id, ret);
1030
1031         return ret;
1032 }
1033
1034 static int
1035 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
1036 {
1037         uint8_t msg_data;
1038         int ret;
1039
1040         msg_data = enable ? 1 : 0;
1041         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
1042                                 &msg_data, sizeof(msg_data), false, NULL, 0);
1043         if (ret)
1044                 hns3_err(hw, "vf enable strip failed, ret =%d", ret);
1045
1046         return ret;
1047 }
1048
1049 static int
1050 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1051 {
1052         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1053         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1054         unsigned int tmp_mask;
1055
1056         tmp_mask = (unsigned int)mask;
1057         /* Vlan stripping setting */
1058         if (tmp_mask & ETH_VLAN_STRIP_MASK) {
1059                 rte_spinlock_lock(&hw->lock);
1060                 /* Enable or disable VLAN stripping */
1061                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1062                         hns3vf_en_hw_strip_rxvtag(hw, true);
1063                 else
1064                         hns3vf_en_hw_strip_rxvtag(hw, false);
1065                 rte_spinlock_unlock(&hw->lock);
1066         }
1067
1068         return 0;
1069 }
1070
1071 static int
1072 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
1073 {
1074         struct rte_vlan_filter_conf *vfc;
1075         struct hns3_hw *hw = &hns->hw;
1076         uint16_t vlan_id;
1077         uint64_t vbit;
1078         uint64_t ids;
1079         int ret = 0;
1080         uint32_t i;
1081
1082         vfc = &hw->data->vlan_filter_conf;
1083         for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1084                 if (vfc->ids[i] == 0)
1085                         continue;
1086                 ids = vfc->ids[i];
1087                 while (ids) {
1088                         /*
1089                          * 64 means the num bits of ids, one bit corresponds to
1090                          * one vlan id
1091                          */
1092                         vlan_id = 64 * i;
1093                         /* count trailing zeroes */
1094                         vbit = ~ids & (ids - 1);
1095                         /* clear least significant bit set */
1096                         ids ^= (ids ^ (ids - 1)) ^ vbit;
1097                         for (; vbit;) {
1098                                 vbit >>= 1;
1099                                 vlan_id++;
1100                         }
1101                         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1102                         if (ret) {
1103                                 hns3_err(hw,
1104                                          "VF handle vlan table failed, ret =%d, on = %d",
1105                                          ret, on);
1106                                 return ret;
1107                         }
1108                 }
1109         }
1110
1111         return ret;
1112 }
1113
1114 static int
1115 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1116 {
1117         return hns3vf_handle_all_vlan_table(hns, 0);
1118 }
1119
1120 static int
1121 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1122 {
1123         struct hns3_hw *hw = &hns->hw;
1124         struct rte_eth_conf *dev_conf;
1125         bool en;
1126         int ret;
1127
1128         dev_conf = &hw->data->dev_conf;
1129         en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
1130                                                                    : false;
1131         ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1132         if (ret)
1133                 hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1134                          ret);
1135         return ret;
1136 }
1137
1138 static int
1139 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1140 {
1141         struct hns3_adapter *hns = dev->data->dev_private;
1142         struct rte_eth_dev_data *data = dev->data;
1143         struct hns3_hw *hw = &hns->hw;
1144         int ret;
1145
1146         if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1147             data->dev_conf.txmode.hw_vlan_reject_untagged ||
1148             data->dev_conf.txmode.hw_vlan_insert_pvid) {
1149                 hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1150                               "or hw_vlan_insert_pvid is not support!");
1151         }
1152
1153         /* Apply vlan offload setting */
1154         ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1155         if (ret)
1156                 hns3_err(hw, "dev config vlan offload failed, ret =%d", ret);
1157
1158         return ret;
1159 }
1160
1161 static int
1162 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1163 {
1164         uint8_t msg_data;
1165
1166         msg_data = alive ? 1 : 0;
1167         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
1168                                  sizeof(msg_data), false, NULL, 0);
1169 }
1170
1171 static void
1172 hns3vf_keep_alive_handler(void *param)
1173 {
1174         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1175         struct hns3_adapter *hns = eth_dev->data->dev_private;
1176         struct hns3_hw *hw = &hns->hw;
1177         uint8_t respmsg;
1178         int ret;
1179
1180         ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
1181                                 false, &respmsg, sizeof(uint8_t));
1182         if (ret)
1183                 hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1184                          ret);
1185
1186         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1187                           eth_dev);
1188 }
1189
1190 static void
1191 hns3vf_service_handler(void *param)
1192 {
1193         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1194         struct hns3_adapter *hns = eth_dev->data->dev_private;
1195         struct hns3_hw *hw = &hns->hw;
1196
1197         /*
1198          * The query link status and reset processing are executed in the
1199          * interrupt thread.When the IMP reset occurs, IMP will not respond,
1200          * and the query operation will time out after 30ms. In the case of
1201          * multiple PF/VFs, each query failure timeout causes the IMP reset
1202          * interrupt to fail to respond within 100ms.
1203          * Before querying the link status, check whether there is a reset
1204          * pending, and if so, abandon the query.
1205          */
1206         if (!hns3vf_is_reset_pending(hns))
1207                 hns3vf_request_link_info(hw);
1208         else
1209                 hns3_warn(hw, "Cancel the query when reset is pending");
1210
1211         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1212                           eth_dev);
1213 }
1214
1215 static int
1216 hns3_query_vf_resource(struct hns3_hw *hw)
1217 {
1218         struct hns3_vf_res_cmd *req;
1219         struct hns3_cmd_desc desc;
1220         uint16_t num_msi;
1221         int ret;
1222
1223         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
1224         ret = hns3_cmd_send(hw, &desc, 1);
1225         if (ret) {
1226                 hns3_err(hw, "query vf resource failed, ret = %d", ret);
1227                 return ret;
1228         }
1229
1230         req = (struct hns3_vf_res_cmd *)desc.data;
1231         num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
1232                                  HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
1233         if (num_msi < HNS3_MIN_VECTOR_NUM) {
1234                 hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
1235                          num_msi, HNS3_MIN_VECTOR_NUM);
1236                 return -EINVAL;
1237         }
1238
1239         hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
1240
1241         return 0;
1242 }
1243
1244 static int
1245 hns3vf_init_hardware(struct hns3_adapter *hns)
1246 {
1247         struct hns3_hw *hw = &hns->hw;
1248         uint16_t mtu = hw->data->mtu;
1249         int ret;
1250
1251         ret = hns3vf_set_promisc_mode(hw, true);
1252         if (ret)
1253                 return ret;
1254
1255         ret = hns3vf_config_mtu(hw, mtu);
1256         if (ret)
1257                 goto err_init_hardware;
1258
1259         ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1260         if (ret) {
1261                 PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1262                 goto err_init_hardware;
1263         }
1264
1265         ret = hns3_config_gro(hw, false);
1266         if (ret) {
1267                 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1268                 goto err_init_hardware;
1269         }
1270
1271         ret = hns3vf_set_alive(hw, true);
1272         if (ret) {
1273                 PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1274                 goto err_init_hardware;
1275         }
1276
1277         hns3vf_request_link_info(hw);
1278         return 0;
1279
1280 err_init_hardware:
1281         (void)hns3vf_set_promisc_mode(hw, false);
1282         return ret;
1283 }
1284
1285 static int
1286 hns3vf_clear_vport_list(struct hns3_hw *hw)
1287 {
1288         return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL,
1289                                  HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false,
1290                                  NULL, 0);
1291 }
1292
1293 static int
1294 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1295 {
1296         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1297         struct hns3_adapter *hns = eth_dev->data->dev_private;
1298         struct hns3_hw *hw = &hns->hw;
1299         int ret;
1300
1301         PMD_INIT_FUNC_TRACE();
1302
1303         /* Get hardware io base address from pcie BAR2 IO space */
1304         hw->io_base = pci_dev->mem_resource[2].addr;
1305
1306         /* Firmware command queue initialize */
1307         ret = hns3_cmd_init_queue(hw);
1308         if (ret) {
1309                 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1310                 goto err_cmd_init_queue;
1311         }
1312
1313         /* Firmware command initialize */
1314         ret = hns3_cmd_init(hw);
1315         if (ret) {
1316                 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1317                 goto err_cmd_init;
1318         }
1319
1320         /* Get VF resource */
1321         ret = hns3_query_vf_resource(hw);
1322         if (ret)
1323                 goto err_cmd_init;
1324
1325         rte_spinlock_init(&hw->mbx_resp.lock);
1326
1327         hns3vf_clear_event_cause(hw, 0);
1328
1329         ret = rte_intr_callback_register(&pci_dev->intr_handle,
1330                                          hns3vf_interrupt_handler, eth_dev);
1331         if (ret) {
1332                 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1333                 goto err_intr_callback_register;
1334         }
1335
1336         /* Enable interrupt */
1337         rte_intr_enable(&pci_dev->intr_handle);
1338         hns3vf_enable_irq0(hw);
1339
1340         /* Get configuration from PF */
1341         ret = hns3vf_get_configuration(hw);
1342         if (ret) {
1343                 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1344                 goto err_get_config;
1345         }
1346
1347         /*
1348          * The hns3 PF ethdev driver in kernel support setting VF MAC address
1349          * on the host by "ip link set ..." command. To avoid some incorrect
1350          * scenes, for example, hns3 VF PMD driver fails to receive and send
1351          * packets after user configure the MAC address by using the
1352          * "ip link set ..." command, hns3 VF PMD driver keep the same MAC
1353          * address strategy as the hns3 kernel ethdev driver in the
1354          * initialization. If user configure a MAC address by the ip command
1355          * for VF device, then hns3 VF PMD driver will start with it, otherwise
1356          * start with a random MAC address in the initialization.
1357          */
1358         ret = rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr);
1359         if (ret)
1360                 rte_eth_random_addr(hw->mac.mac_addr);
1361
1362         ret = hns3vf_clear_vport_list(hw);
1363         if (ret) {
1364                 PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1365                 goto err_get_config;
1366         }
1367
1368         ret = hns3vf_init_hardware(hns);
1369         if (ret)
1370                 goto err_get_config;
1371
1372         hns3_set_default_rss_args(hw);
1373
1374         /*
1375          * In the initialization clearing the all hardware mapping relationship
1376          * configurations between queues and interrupt vectors is needed, so
1377          * some error caused by the residual configurations, such as the
1378          * unexpected interrupt, can be avoid.
1379          */
1380         ret = hns3vf_init_ring_with_vector(hw);
1381         if (ret)
1382                 goto err_get_config;
1383
1384         return 0;
1385
1386 err_get_config:
1387         hns3vf_disable_irq0(hw);
1388         rte_intr_disable(&pci_dev->intr_handle);
1389         hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
1390                              eth_dev);
1391 err_intr_callback_register:
1392         hns3_cmd_uninit(hw);
1393
1394 err_cmd_init:
1395         hns3_cmd_destroy_queue(hw);
1396
1397 err_cmd_init_queue:
1398         hw->io_base = NULL;
1399
1400         return ret;
1401 }
1402
1403 static void
1404 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1405 {
1406         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1407         struct hns3_adapter *hns = eth_dev->data->dev_private;
1408         struct hns3_hw *hw = &hns->hw;
1409
1410         PMD_INIT_FUNC_TRACE();
1411
1412         hns3_rss_uninit(hns);
1413         (void)hns3vf_set_alive(hw, false);
1414         (void)hns3vf_set_promisc_mode(hw, false);
1415         hns3vf_disable_irq0(hw);
1416         rte_intr_disable(&pci_dev->intr_handle);
1417         hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
1418                              eth_dev);
1419         hns3_cmd_uninit(hw);
1420         hns3_cmd_destroy_queue(hw);
1421         hw->io_base = NULL;
1422 }
1423
1424 static int
1425 hns3vf_do_stop(struct hns3_adapter *hns)
1426 {
1427         struct hns3_hw *hw = &hns->hw;
1428         bool reset_queue;
1429
1430         hw->mac.link_status = ETH_LINK_DOWN;
1431
1432         if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
1433                 hns3vf_configure_mac_addr(hns, true);
1434                 reset_queue = true;
1435         } else
1436                 reset_queue = false;
1437         return hns3_stop_queues(hns, reset_queue);
1438 }
1439
1440 static void
1441 hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
1442 {
1443         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1444         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1445         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1446         uint8_t base = 0;
1447         uint8_t vec = 0;
1448         uint16_t q_id;
1449
1450         if (dev->data->dev_conf.intr_conf.rxq == 0)
1451                 return;
1452
1453         /* unmap the ring with vector */
1454         if (rte_intr_allow_others(intr_handle)) {
1455                 vec = RTE_INTR_VEC_RXTX_OFFSET;
1456                 base = RTE_INTR_VEC_RXTX_OFFSET;
1457         }
1458         if (rte_intr_dp_is_en(intr_handle)) {
1459                 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
1460                         (void)hns3vf_bind_ring_with_vector(hw, vec, false,
1461                                                            HNS3_RING_TYPE_RX,
1462                                                            q_id);
1463                         if (vec < base + intr_handle->nb_efd - 1)
1464                                 vec++;
1465                 }
1466         }
1467         /* Clean datapath event and queue/vec mapping */
1468         rte_intr_efd_disable(intr_handle);
1469         if (intr_handle->intr_vec) {
1470                 rte_free(intr_handle->intr_vec);
1471                 intr_handle->intr_vec = NULL;
1472         }
1473 }
1474
1475 static void
1476 hns3vf_dev_stop(struct rte_eth_dev *dev)
1477 {
1478         struct hns3_adapter *hns = dev->data->dev_private;
1479         struct hns3_hw *hw = &hns->hw;
1480
1481         PMD_INIT_FUNC_TRACE();
1482
1483         hw->adapter_state = HNS3_NIC_STOPPING;
1484         hns3_set_rxtx_function(dev);
1485         rte_wmb();
1486         /* Disable datapath on secondary process. */
1487         hns3_mp_req_stop_rxtx(dev);
1488         /* Prevent crashes when queues are still in use. */
1489         rte_delay_ms(hw->tqps_num);
1490
1491         rte_spinlock_lock(&hw->lock);
1492         if (rte_atomic16_read(&hw->reset.resetting) == 0) {
1493                 hns3vf_do_stop(hns);
1494                 hns3_dev_release_mbufs(hns);
1495                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1496         }
1497         rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1498         rte_spinlock_unlock(&hw->lock);
1499
1500         hns3vf_unmap_rx_interrupt(dev);
1501 }
1502
1503 static void
1504 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1505 {
1506         struct hns3_adapter *hns = eth_dev->data->dev_private;
1507         struct hns3_hw *hw = &hns->hw;
1508
1509         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1510                 return;
1511
1512         if (hw->adapter_state == HNS3_NIC_STARTED)
1513                 hns3vf_dev_stop(eth_dev);
1514
1515         hw->adapter_state = HNS3_NIC_CLOSING;
1516         hns3_reset_abort(hns);
1517         hw->adapter_state = HNS3_NIC_CLOSED;
1518         rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1519         hns3vf_configure_all_mc_mac_addr(hns, true);
1520         hns3vf_remove_all_vlan_table(hns);
1521         hns3vf_uninit_vf(eth_dev);
1522         hns3_free_all_queues(eth_dev);
1523         rte_free(hw->reset.wait_data);
1524         rte_free(eth_dev->process_private);
1525         eth_dev->process_private = NULL;
1526         hns3_mp_uninit_primary();
1527         hns3_warn(hw, "Close port %d finished", hw->data->port_id);
1528 }
1529
1530 static int
1531 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1532                        __rte_unused int wait_to_complete)
1533 {
1534         struct hns3_adapter *hns = eth_dev->data->dev_private;
1535         struct hns3_hw *hw = &hns->hw;
1536         struct hns3_mac *mac = &hw->mac;
1537         struct rte_eth_link new_link;
1538
1539         memset(&new_link, 0, sizeof(new_link));
1540         switch (mac->link_speed) {
1541         case ETH_SPEED_NUM_10M:
1542         case ETH_SPEED_NUM_100M:
1543         case ETH_SPEED_NUM_1G:
1544         case ETH_SPEED_NUM_10G:
1545         case ETH_SPEED_NUM_25G:
1546         case ETH_SPEED_NUM_40G:
1547         case ETH_SPEED_NUM_50G:
1548         case ETH_SPEED_NUM_100G:
1549                 new_link.link_speed = mac->link_speed;
1550                 break;
1551         default:
1552                 new_link.link_speed = ETH_SPEED_NUM_100M;
1553                 break;
1554         }
1555
1556         new_link.link_duplex = mac->link_duplex;
1557         new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
1558         new_link.link_autoneg =
1559             !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
1560
1561         return rte_eth_linkstatus_set(eth_dev, &new_link);
1562 }
1563
1564 static int
1565 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1566 {
1567         struct hns3_hw *hw = &hns->hw;
1568         int ret;
1569
1570         ret = hns3vf_set_tc_info(hns);
1571         if (ret)
1572                 return ret;
1573
1574         ret = hns3_start_queues(hns, reset_queue);
1575         if (ret) {
1576                 hns3_err(hw, "Failed to start queues: %d", ret);
1577                 return ret;
1578         }
1579
1580         return 0;
1581 }
1582
1583 static int
1584 hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
1585 {
1586         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1587         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1588         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1589         uint32_t intr_vector;
1590         uint8_t base = 0;
1591         uint8_t vec = 0;
1592         uint16_t q_id;
1593         int ret;
1594
1595         if (dev->data->dev_conf.intr_conf.rxq == 0)
1596                 return 0;
1597
1598         /* disable uio/vfio intr/eventfd mapping */
1599         rte_intr_disable(intr_handle);
1600
1601         /* check and configure queue intr-vector mapping */
1602         if (rte_intr_cap_multiple(intr_handle) ||
1603             !RTE_ETH_DEV_SRIOV(dev).active) {
1604                 intr_vector = hw->used_rx_queues;
1605                 /* It creates event fd for each intr vector when MSIX is used */
1606                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1607                         return -EINVAL;
1608         }
1609         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1610                 intr_handle->intr_vec =
1611                         rte_zmalloc("intr_vec",
1612                                     hw->used_rx_queues * sizeof(int), 0);
1613                 if (intr_handle->intr_vec == NULL) {
1614                         hns3_err(hw, "Failed to allocate %d rx_queues"
1615                                      " intr_vec", hw->used_rx_queues);
1616                         ret = -ENOMEM;
1617                         goto vf_alloc_intr_vec_error;
1618                 }
1619         }
1620
1621         if (rte_intr_allow_others(intr_handle)) {
1622                 vec = RTE_INTR_VEC_RXTX_OFFSET;
1623                 base = RTE_INTR_VEC_RXTX_OFFSET;
1624         }
1625         if (rte_intr_dp_is_en(intr_handle)) {
1626                 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
1627                         ret = hns3vf_bind_ring_with_vector(hw, vec, true,
1628                                                            HNS3_RING_TYPE_RX,
1629                                                            q_id);
1630                         if (ret)
1631                                 goto vf_bind_vector_error;
1632                         intr_handle->intr_vec[q_id] = vec;
1633                         if (vec < base + intr_handle->nb_efd - 1)
1634                                 vec++;
1635                 }
1636         }
1637         rte_intr_enable(intr_handle);
1638         return 0;
1639
1640 vf_bind_vector_error:
1641         rte_intr_efd_disable(intr_handle);
1642         if (intr_handle->intr_vec) {
1643                 free(intr_handle->intr_vec);
1644                 intr_handle->intr_vec = NULL;
1645         }
1646         return ret;
1647 vf_alloc_intr_vec_error:
1648         rte_intr_efd_disable(intr_handle);
1649         return ret;
1650 }
1651
1652 static int
1653 hns3vf_dev_start(struct rte_eth_dev *dev)
1654 {
1655         struct hns3_adapter *hns = dev->data->dev_private;
1656         struct hns3_hw *hw = &hns->hw;
1657         int ret;
1658
1659         PMD_INIT_FUNC_TRACE();
1660         if (rte_atomic16_read(&hw->reset.resetting))
1661                 return -EBUSY;
1662
1663         rte_spinlock_lock(&hw->lock);
1664         hw->adapter_state = HNS3_NIC_STARTING;
1665         ret = hns3vf_do_start(hns, true);
1666         if (ret) {
1667                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1668                 rte_spinlock_unlock(&hw->lock);
1669                 return ret;
1670         }
1671         hw->adapter_state = HNS3_NIC_STARTED;
1672         rte_spinlock_unlock(&hw->lock);
1673
1674         ret = hns3vf_map_rx_interrupt(dev);
1675         if (ret)
1676                 return ret;
1677         hns3_set_rxtx_function(dev);
1678         hns3_mp_req_start_rxtx(dev);
1679         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
1680
1681         return ret;
1682 }
1683
1684 static bool
1685 is_vf_reset_done(struct hns3_hw *hw)
1686 {
1687 #define HNS3_FUN_RST_ING_BITS \
1688         (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1689          BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1690          BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1691          BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1692
1693         uint32_t val;
1694
1695         if (hw->reset.level == HNS3_VF_RESET) {
1696                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1697                 if (val & HNS3_VF_RST_ING_BIT)
1698                         return false;
1699         } else {
1700                 val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1701                 if (val & HNS3_FUN_RST_ING_BITS)
1702                         return false;
1703         }
1704         return true;
1705 }
1706
1707 bool
1708 hns3vf_is_reset_pending(struct hns3_adapter *hns)
1709 {
1710         struct hns3_hw *hw = &hns->hw;
1711         enum hns3_reset_level reset;
1712
1713         hns3vf_check_event_cause(hns, NULL);
1714         reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
1715         if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
1716                 hns3_warn(hw, "High level reset %d is pending", reset);
1717                 return true;
1718         }
1719         return false;
1720 }
1721
1722 static int
1723 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1724 {
1725         struct hns3_hw *hw = &hns->hw;
1726         struct hns3_wait_data *wait_data = hw->reset.wait_data;
1727         struct timeval tv;
1728
1729         if (wait_data->result == HNS3_WAIT_SUCCESS) {
1730                 /*
1731                  * After vf reset is ready, the PF may not have completed
1732                  * the reset processing. The vf sending mbox to PF may fail
1733                  * during the pf reset, so it is better to add extra delay.
1734                  */
1735                 if (hw->reset.level == HNS3_VF_FUNC_RESET ||
1736                     hw->reset.level == HNS3_FLR_RESET)
1737                         return 0;
1738                 /* Reset retry process, no need to add extra delay. */
1739                 if (hw->reset.attempts)
1740                         return 0;
1741                 if (wait_data->check_completion == NULL)
1742                         return 0;
1743
1744                 wait_data->check_completion = NULL;
1745                 wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC;
1746                 wait_data->count = 1;
1747                 wait_data->result = HNS3_WAIT_REQUEST;
1748                 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
1749                                   wait_data);
1750                 hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete");
1751                 return -EAGAIN;
1752         } else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1753                 gettimeofday(&tv, NULL);
1754                 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1755                           tv.tv_sec, tv.tv_usec);
1756                 return -ETIME;
1757         } else if (wait_data->result == HNS3_WAIT_REQUEST)
1758                 return -EAGAIN;
1759
1760         wait_data->hns = hns;
1761         wait_data->check_completion = is_vf_reset_done;
1762         wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1763                                       HNS3VF_RESET_WAIT_MS + get_timeofday_ms();
1764         wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1765         wait_data->count = HNS3VF_RESET_WAIT_CNT;
1766         wait_data->result = HNS3_WAIT_REQUEST;
1767         rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1768         return -EAGAIN;
1769 }
1770
1771 static int
1772 hns3vf_prepare_reset(struct hns3_adapter *hns)
1773 {
1774         struct hns3_hw *hw = &hns->hw;
1775         int ret = 0;
1776
1777         if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1778                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
1779                                         0, true, NULL, 0);
1780         }
1781         rte_atomic16_set(&hw->reset.disable_cmd, 1);
1782
1783         return ret;
1784 }
1785
1786 static int
1787 hns3vf_stop_service(struct hns3_adapter *hns)
1788 {
1789         struct hns3_hw *hw = &hns->hw;
1790         struct rte_eth_dev *eth_dev;
1791
1792         eth_dev = &rte_eth_devices[hw->data->port_id];
1793         if (hw->adapter_state == HNS3_NIC_STARTED)
1794                 rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
1795         hw->mac.link_status = ETH_LINK_DOWN;
1796
1797         hns3_set_rxtx_function(eth_dev);
1798         rte_wmb();
1799         /* Disable datapath on secondary process. */
1800         hns3_mp_req_stop_rxtx(eth_dev);
1801         rte_delay_ms(hw->tqps_num);
1802
1803         rte_spinlock_lock(&hw->lock);
1804         if (hw->adapter_state == HNS3_NIC_STARTED ||
1805             hw->adapter_state == HNS3_NIC_STOPPING) {
1806                 hns3vf_do_stop(hns);
1807                 hw->reset.mbuf_deferred_free = true;
1808         } else
1809                 hw->reset.mbuf_deferred_free = false;
1810
1811         /*
1812          * It is cumbersome for hardware to pick-and-choose entries for deletion
1813          * from table space. Hence, for function reset software intervention is
1814          * required to delete the entries.
1815          */
1816         if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
1817                 hns3vf_configure_all_mc_mac_addr(hns, true);
1818         rte_spinlock_unlock(&hw->lock);
1819
1820         return 0;
1821 }
1822
1823 static int
1824 hns3vf_start_service(struct hns3_adapter *hns)
1825 {
1826         struct hns3_hw *hw = &hns->hw;
1827         struct rte_eth_dev *eth_dev;
1828
1829         eth_dev = &rte_eth_devices[hw->data->port_id];
1830         hns3_set_rxtx_function(eth_dev);
1831         hns3_mp_req_start_rxtx(eth_dev);
1832         if (hw->adapter_state == HNS3_NIC_STARTED)
1833                 hns3vf_service_handler(eth_dev);
1834
1835         return 0;
1836 }
1837
1838 static int
1839 hns3vf_check_default_mac_change(struct hns3_hw *hw)
1840 {
1841         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1842         struct rte_ether_addr *hw_mac;
1843         int ret;
1844
1845         /*
1846          * The hns3 PF ethdev driver in kernel support setting VF MAC address
1847          * on the host by "ip link set ..." command. If the hns3 PF kernel
1848          * ethdev driver sets the MAC address for VF device after the
1849          * initialization of the related VF device, the PF driver will notify
1850          * VF driver to reset VF device to make the new MAC address effective
1851          * immediately. The hns3 VF PMD driver should check whether the MAC
1852          * address has been changed by the PF kernel ethdev driver, if changed
1853          * VF driver should configure hardware using the new MAC address in the
1854          * recovering hardware configuration stage of the reset process.
1855          */
1856         ret = hns3vf_get_host_mac_addr(hw);
1857         if (ret)
1858                 return ret;
1859
1860         hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
1861         ret = rte_is_zero_ether_addr(hw_mac);
1862         if (ret) {
1863                 rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
1864         } else {
1865                 ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
1866                 if (!ret) {
1867                         rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
1868                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1869                                               &hw->data->mac_addrs[0]);
1870                         hns3_warn(hw, "Default MAC address has been changed to:"
1871                                   " %s by the host PF kernel ethdev driver",
1872                                   mac_str);
1873                 }
1874         }
1875
1876         return 0;
1877 }
1878
1879 static int
1880 hns3vf_restore_conf(struct hns3_adapter *hns)
1881 {
1882         struct hns3_hw *hw = &hns->hw;
1883         int ret;
1884
1885         ret = hns3vf_check_default_mac_change(hw);
1886         if (ret)
1887                 return ret;
1888
1889         ret = hns3vf_configure_mac_addr(hns, false);
1890         if (ret)
1891                 return ret;
1892
1893         ret = hns3vf_configure_all_mc_mac_addr(hns, false);
1894         if (ret)
1895                 goto err_mc_mac;
1896
1897         ret = hns3vf_restore_vlan_conf(hns);
1898         if (ret)
1899                 goto err_vlan_table;
1900
1901         if (hw->adapter_state == HNS3_NIC_STARTED) {
1902                 ret = hns3vf_do_start(hns, false);
1903                 if (ret)
1904                         goto err_vlan_table;
1905                 hns3_info(hw, "hns3vf dev restart successful!");
1906         } else if (hw->adapter_state == HNS3_NIC_STOPPING)
1907                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1908         return 0;
1909
1910 err_vlan_table:
1911         hns3vf_configure_all_mc_mac_addr(hns, true);
1912 err_mc_mac:
1913         hns3vf_configure_mac_addr(hns, true);
1914         return ret;
1915 }
1916
1917 static enum hns3_reset_level
1918 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
1919 {
1920         enum hns3_reset_level reset_level;
1921
1922         /* return the highest priority reset level amongst all */
1923         if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
1924                 reset_level = HNS3_VF_RESET;
1925         else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
1926                 reset_level = HNS3_VF_FULL_RESET;
1927         else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
1928                 reset_level = HNS3_VF_PF_FUNC_RESET;
1929         else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
1930                 reset_level = HNS3_VF_FUNC_RESET;
1931         else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
1932                 reset_level = HNS3_FLR_RESET;
1933         else
1934                 reset_level = HNS3_NONE_RESET;
1935
1936         if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
1937                 return HNS3_NONE_RESET;
1938
1939         return reset_level;
1940 }
1941
1942 static void
1943 hns3vf_reset_service(void *param)
1944 {
1945         struct hns3_adapter *hns = (struct hns3_adapter *)param;
1946         struct hns3_hw *hw = &hns->hw;
1947         enum hns3_reset_level reset_level;
1948         struct timeval tv_delta;
1949         struct timeval tv_start;
1950         struct timeval tv;
1951         uint64_t msec;
1952
1953         /*
1954          * The interrupt is not triggered within the delay time.
1955          * The interrupt may have been lost. It is necessary to handle
1956          * the interrupt to recover from the error.
1957          */
1958         if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
1959                 rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
1960                 hns3_err(hw, "Handling interrupts in delayed tasks");
1961                 hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
1962                 reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
1963                 if (reset_level == HNS3_NONE_RESET) {
1964                         hns3_err(hw, "No reset level is set, try global reset");
1965                         hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
1966                 }
1967         }
1968         rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
1969
1970         /*
1971          * Hardware reset has been notified, we now have to poll & check if
1972          * hardware has actually completed the reset sequence.
1973          */
1974         reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
1975         if (reset_level != HNS3_NONE_RESET) {
1976                 gettimeofday(&tv_start, NULL);
1977                 hns3_reset_process(hns, reset_level);
1978                 gettimeofday(&tv, NULL);
1979                 timersub(&tv, &tv_start, &tv_delta);
1980                 msec = tv_delta.tv_sec * MSEC_PER_SEC +
1981                        tv_delta.tv_usec / USEC_PER_MSEC;
1982                 if (msec > HNS3_RESET_PROCESS_MS)
1983                         hns3_err(hw, "%d handle long time delta %" PRIx64
1984                                  " ms time=%ld.%.6ld",
1985                                  hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
1986         }
1987 }
1988
1989 static int
1990 hns3vf_reinit_dev(struct hns3_adapter *hns)
1991 {
1992         struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
1993         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1994         struct hns3_hw *hw = &hns->hw;
1995         int ret;
1996
1997         if (hw->reset.level == HNS3_VF_FULL_RESET) {
1998                 rte_intr_disable(&pci_dev->intr_handle);
1999                 hns3vf_set_bus_master(pci_dev, true);
2000         }
2001
2002         /* Firmware command initialize */
2003         ret = hns3_cmd_init(hw);
2004         if (ret) {
2005                 hns3_err(hw, "Failed to init cmd: %d", ret);
2006                 goto err_cmd_init;
2007         }
2008
2009         if (hw->reset.level == HNS3_VF_FULL_RESET) {
2010                 /*
2011                  * UIO enables msix by writing the pcie configuration space
2012                  * vfio_pci enables msix in rte_intr_enable.
2013                  */
2014                 if (pci_dev->kdrv == RTE_KDRV_IGB_UIO ||
2015                     pci_dev->kdrv == RTE_KDRV_UIO_GENERIC) {
2016                         if (hns3vf_enable_msix(pci_dev, true))
2017                                 hns3_err(hw, "Failed to enable msix");
2018                 }
2019
2020                 rte_intr_enable(&pci_dev->intr_handle);
2021         }
2022
2023         ret = hns3_reset_all_queues(hns);
2024         if (ret) {
2025                 hns3_err(hw, "Failed to reset all queues: %d", ret);
2026                 goto err_init;
2027         }
2028
2029         ret = hns3vf_init_hardware(hns);
2030         if (ret) {
2031                 hns3_err(hw, "Failed to init hardware: %d", ret);
2032                 goto err_init;
2033         }
2034
2035         return 0;
2036
2037 err_cmd_init:
2038         hns3vf_set_bus_master(pci_dev, false);
2039 err_init:
2040         hns3_cmd_uninit(hw);
2041         return ret;
2042 }
2043
2044 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
2045         .dev_start          = hns3vf_dev_start,
2046         .dev_stop           = hns3vf_dev_stop,
2047         .dev_close          = hns3vf_dev_close,
2048         .mtu_set            = hns3vf_dev_mtu_set,
2049         .stats_get          = hns3_stats_get,
2050         .stats_reset        = hns3_stats_reset,
2051         .xstats_get         = hns3_dev_xstats_get,
2052         .xstats_get_names   = hns3_dev_xstats_get_names,
2053         .xstats_reset       = hns3_dev_xstats_reset,
2054         .xstats_get_by_id   = hns3_dev_xstats_get_by_id,
2055         .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
2056         .dev_infos_get      = hns3vf_dev_infos_get,
2057         .rx_queue_setup     = hns3_rx_queue_setup,
2058         .tx_queue_setup     = hns3_tx_queue_setup,
2059         .rx_queue_release   = hns3_dev_rx_queue_release,
2060         .tx_queue_release   = hns3_dev_tx_queue_release,
2061         .rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
2062         .rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
2063         .dev_configure      = hns3vf_dev_configure,
2064         .mac_addr_add       = hns3vf_add_mac_addr,
2065         .mac_addr_remove    = hns3vf_remove_mac_addr,
2066         .mac_addr_set       = hns3vf_set_default_mac_addr,
2067         .set_mc_addr_list   = hns3vf_set_mc_mac_addr_list,
2068         .link_update        = hns3vf_dev_link_update,
2069         .rss_hash_update    = hns3_dev_rss_hash_update,
2070         .rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
2071         .reta_update        = hns3_dev_rss_reta_update,
2072         .reta_query         = hns3_dev_rss_reta_query,
2073         .filter_ctrl        = hns3_dev_filter_ctrl,
2074         .vlan_filter_set    = hns3vf_vlan_filter_set,
2075         .vlan_offload_set   = hns3vf_vlan_offload_set,
2076         .get_reg            = hns3_get_regs,
2077         .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
2078 };
2079
2080 static const struct hns3_reset_ops hns3vf_reset_ops = {
2081         .reset_service       = hns3vf_reset_service,
2082         .stop_service        = hns3vf_stop_service,
2083         .prepare_reset       = hns3vf_prepare_reset,
2084         .wait_hardware_ready = hns3vf_wait_hardware_ready,
2085         .reinit_dev          = hns3vf_reinit_dev,
2086         .restore_conf        = hns3vf_restore_conf,
2087         .start_service       = hns3vf_start_service,
2088 };
2089
2090 static int
2091 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
2092 {
2093         struct hns3_adapter *hns = eth_dev->data->dev_private;
2094         struct hns3_hw *hw = &hns->hw;
2095         int ret;
2096
2097         PMD_INIT_FUNC_TRACE();
2098
2099         eth_dev->process_private = (struct hns3_process_private *)
2100             rte_zmalloc_socket("hns3_filter_list",
2101                                sizeof(struct hns3_process_private),
2102                                RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
2103         if (eth_dev->process_private == NULL) {
2104                 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
2105                 return -ENOMEM;
2106         }
2107
2108         /* initialize flow filter lists */
2109         hns3_filterlist_init(eth_dev);
2110
2111         hns3_set_rxtx_function(eth_dev);
2112         eth_dev->dev_ops = &hns3vf_eth_dev_ops;
2113         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2114                 hns3_mp_init_secondary();
2115                 hw->secondary_cnt++;
2116                 return 0;
2117         }
2118
2119         hns3_mp_init_primary();
2120
2121         hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2122         hns->is_vf = true;
2123         hw->data = eth_dev->data;
2124
2125         ret = hns3_reset_init(hw);
2126         if (ret)
2127                 goto err_init_reset;
2128         hw->reset.ops = &hns3vf_reset_ops;
2129
2130         ret = hns3vf_init_vf(eth_dev);
2131         if (ret) {
2132                 PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2133                 goto err_init_vf;
2134         }
2135
2136         /* Allocate memory for storing MAC addresses */
2137         eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac",
2138                                                sizeof(struct rte_ether_addr) *
2139                                                HNS3_VF_UC_MACADDR_NUM, 0);
2140         if (eth_dev->data->mac_addrs == NULL) {
2141                 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
2142                              "to store MAC addresses",
2143                              sizeof(struct rte_ether_addr) *
2144                              HNS3_VF_UC_MACADDR_NUM);
2145                 ret = -ENOMEM;
2146                 goto err_rte_zmalloc;
2147         }
2148
2149         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
2150                             &eth_dev->data->mac_addrs[0]);
2151         hw->adapter_state = HNS3_NIC_INITIALIZED;
2152         /*
2153          * Pass the information to the rte_eth_dev_close() that it should also
2154          * release the private port resources.
2155          */
2156         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2157
2158         if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
2159                 hns3_err(hw, "Reschedule reset service after dev_init");
2160                 hns3_schedule_reset(hns);
2161         } else {
2162                 /* IMP will wait ready flag before reset */
2163                 hns3_notify_reset_ready(hw, false);
2164         }
2165         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
2166                           eth_dev);
2167         return 0;
2168
2169 err_rte_zmalloc:
2170         hns3vf_uninit_vf(eth_dev);
2171
2172 err_init_vf:
2173         rte_free(hw->reset.wait_data);
2174
2175 err_init_reset:
2176         eth_dev->dev_ops = NULL;
2177         eth_dev->rx_pkt_burst = NULL;
2178         eth_dev->tx_pkt_burst = NULL;
2179         eth_dev->tx_pkt_prepare = NULL;
2180         rte_free(eth_dev->process_private);
2181         eth_dev->process_private = NULL;
2182
2183         return ret;
2184 }
2185
2186 static int
2187 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
2188 {
2189         struct hns3_adapter *hns = eth_dev->data->dev_private;
2190         struct hns3_hw *hw = &hns->hw;
2191
2192         PMD_INIT_FUNC_TRACE();
2193
2194         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2195                 return -EPERM;
2196
2197         eth_dev->dev_ops = NULL;
2198         eth_dev->rx_pkt_burst = NULL;
2199         eth_dev->tx_pkt_burst = NULL;
2200         eth_dev->tx_pkt_prepare = NULL;
2201
2202         if (hw->adapter_state < HNS3_NIC_CLOSING)
2203                 hns3vf_dev_close(eth_dev);
2204
2205         hw->adapter_state = HNS3_NIC_REMOVED;
2206         return 0;
2207 }
2208
2209 static int
2210 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2211                      struct rte_pci_device *pci_dev)
2212 {
2213         return rte_eth_dev_pci_generic_probe(pci_dev,
2214                                              sizeof(struct hns3_adapter),
2215                                              hns3vf_dev_init);
2216 }
2217
2218 static int
2219 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
2220 {
2221         return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
2222 }
2223
2224 static const struct rte_pci_id pci_id_hns3vf_map[] = {
2225         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
2226         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
2227         { .vendor_id = 0, /* sentinel */ },
2228 };
2229
2230 static struct rte_pci_driver rte_hns3vf_pmd = {
2231         .id_table = pci_id_hns3vf_map,
2232         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2233         .probe = eth_hns3vf_pci_probe,
2234         .remove = eth_hns3vf_pci_remove,
2235 };
2236
2237 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
2238 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
2239 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");