net/hns3: support device reset
[dpdk.git] / drivers / net / hns3 / hns3_ethdev_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <arpa/inet.h>
12 #include <rte_alarm.h>
13 #include <rte_atomic.h>
14 #include <rte_bus_pci.h>
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
18 #include <rte_dev.h>
19 #include <rte_eal.h>
20 #include <rte_ether.h>
21 #include <rte_ethdev_driver.h>
22 #include <rte_ethdev_pci.h>
23 #include <rte_interrupts.h>
24 #include <rte_io.h>
25 #include <rte_log.h>
26 #include <rte_pci.h>
27
28 #include "hns3_ethdev.h"
29 #include "hns3_logs.h"
30 #include "hns3_rxtx.h"
31 #include "hns3_regs.h"
32 #include "hns3_intr.h"
33 #include "hns3_dcb.h"
34
35 #define HNS3VF_KEEP_ALIVE_INTERVAL      2000000 /* us */
36 #define HNS3VF_SERVICE_INTERVAL         1000000 /* us */
37
38 #define HNS3VF_RESET_WAIT_MS    20
39 #define HNS3VF_RESET_WAIT_CNT   2000
40
41 /* Reset related Registers */
42 #define HNS3_GLOBAL_RESET_BIT           0
43 #define HNS3_CORE_RESET_BIT             1
44 #define HNS3_IMP_RESET_BIT              2
45 #define HNS3_FUN_RST_ING_B              0
46
47 enum hns3vf_evt_cause {
48         HNS3VF_VECTOR0_EVENT_RST,
49         HNS3VF_VECTOR0_EVENT_MBX,
50         HNS3VF_VECTOR0_EVENT_OTHER,
51 };
52
53 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
54                                                     uint64_t *levels);
55 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
56 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
57
58 static int
59 hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
60                     __attribute__ ((unused)) uint32_t idx,
61                     __attribute__ ((unused)) uint32_t pool)
62 {
63         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
64         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
65         int ret;
66
67         rte_spinlock_lock(&hw->lock);
68         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
69                                 HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
70                                 RTE_ETHER_ADDR_LEN, false, NULL, 0);
71         rte_spinlock_unlock(&hw->lock);
72         if (ret) {
73                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
74                                       mac_addr);
75                 hns3_err(hw, "Failed to add mac addr(%s) for vf: %d", mac_str,
76                          ret);
77         }
78
79         return ret;
80 }
81
82 static void
83 hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
84 {
85         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
86         /* index will be checked by upper level rte interface */
87         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
88         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
89         int ret;
90
91         rte_spinlock_lock(&hw->lock);
92         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
93                                 HNS3_MBX_MAC_VLAN_UC_REMOVE,
94                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
95                                 NULL, 0);
96         rte_spinlock_unlock(&hw->lock);
97         if (ret) {
98                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
99                                       mac_addr);
100                 hns3_err(hw, "Failed to remove mac addr(%s) for vf: %d",
101                          mac_str, ret);
102         }
103 }
104
105 static int
106 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
107                             struct rte_ether_addr *mac_addr)
108 {
109 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
110         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
111         struct rte_ether_addr *old_addr;
112         uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
113         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
114         int ret;
115
116         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
117                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
118                                       mac_addr);
119                 hns3_err(hw, "Failed to set mac addr, addr(%s) invalid.",
120                          mac_str);
121                 return -EINVAL;
122         }
123
124         old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
125         rte_spinlock_lock(&hw->lock);
126         memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
127         memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
128                RTE_ETHER_ADDR_LEN);
129
130         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
131                                 HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
132                                 HNS3_TWO_ETHER_ADDR_LEN, false, NULL, 0);
133         if (ret) {
134                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
135                                       mac_addr);
136                 hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", mac_str,
137                          ret);
138         }
139
140         rte_ether_addr_copy(mac_addr,
141                             (struct rte_ether_addr *)hw->mac.mac_addr);
142         rte_spinlock_unlock(&hw->lock);
143
144         return ret;
145 }
146
147 static int
148 hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del)
149 {
150         struct hns3_hw *hw = &hns->hw;
151         struct rte_ether_addr *addr;
152         enum hns3_mbx_mac_vlan_subcode opcode;
153         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
154         int ret = 0;
155         int i;
156
157         if (del)
158                 opcode = HNS3_MBX_MAC_VLAN_UC_REMOVE;
159         else
160                 opcode = HNS3_MBX_MAC_VLAN_UC_ADD;
161         for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) {
162                 addr = &hw->data->mac_addrs[i];
163                 if (!rte_is_valid_assigned_ether_addr(addr))
164                         continue;
165                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
166                 hns3_dbg(hw, "rm mac addr: %s", mac_str);
167                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, opcode,
168                                         addr->addr_bytes, RTE_ETHER_ADDR_LEN,
169                                         false, NULL, 0);
170                 if (ret) {
171                         hns3_err(hw, "Failed to remove mac addr for vf: %d",
172                                  ret);
173                         break;
174                 }
175         }
176         return ret;
177 }
178
179 static int
180 hns3vf_add_mc_mac_addr(struct hns3_adapter *hns,
181                        struct rte_ether_addr *mac_addr)
182 {
183         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
184         struct hns3_hw *hw = &hns->hw;
185         int ret;
186
187         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
188                                 HNS3_MBX_MAC_VLAN_MC_ADD,
189                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
190                                 NULL, 0);
191         if (ret) {
192                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
193                                       mac_addr);
194                 hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
195                          mac_str, ret);
196                 return ret;
197         }
198
199         return 0;
200 }
201
202 static int
203 hns3vf_remove_mc_mac_addr(struct hns3_adapter *hns,
204                           struct rte_ether_addr *mac_addr)
205 {
206         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
207         struct hns3_hw *hw = &hns->hw;
208         int ret;
209
210         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
211                                 HNS3_MBX_MAC_VLAN_MC_REMOVE,
212                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
213                                 NULL, 0);
214         if (ret) {
215                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
216                                       mac_addr);
217                 hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
218                          mac_str, ret);
219                 return ret;
220         }
221
222         return 0;
223 }
224
225 static int
226 hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev,
227                             struct rte_ether_addr *mc_addr_set,
228                             uint32_t nb_mc_addr)
229 {
230         struct hns3_adapter *hns = dev->data->dev_private;
231         struct hns3_hw *hw = &hns->hw;
232         struct rte_ether_addr *addr;
233         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
234         int cur_addr_num;
235         int set_addr_num;
236         int num;
237         int ret;
238         int i;
239
240         if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
241                 hns3_err(hw, "Failed to set mc mac addr, nb_mc_addr(%d) "
242                          "invalid. valid range: 0~%d",
243                          nb_mc_addr, HNS3_MC_MACADDR_NUM);
244                 return -EINVAL;
245         }
246
247         set_addr_num = (int)nb_mc_addr;
248         for (i = 0; i < set_addr_num; i++) {
249                 addr = &mc_addr_set[i];
250                 if (!rte_is_multicast_ether_addr(addr)) {
251                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
252                                               addr);
253                         hns3_err(hw,
254                                  "Failed to set mc mac addr, addr(%s) invalid.",
255                                  mac_str);
256                         return -EINVAL;
257                 }
258         }
259         rte_spinlock_lock(&hw->lock);
260         cur_addr_num = hw->mc_addrs_num;
261         for (i = 0; i < cur_addr_num; i++) {
262                 num = cur_addr_num - i - 1;
263                 addr = &hw->mc_addrs[num];
264                 ret = hns3vf_remove_mc_mac_addr(hns, addr);
265                 if (ret) {
266                         rte_spinlock_unlock(&hw->lock);
267                         return ret;
268                 }
269
270                 hw->mc_addrs_num--;
271         }
272
273         for (i = 0; i < set_addr_num; i++) {
274                 addr = &mc_addr_set[i];
275                 ret = hns3vf_add_mc_mac_addr(hns, addr);
276                 if (ret) {
277                         rte_spinlock_unlock(&hw->lock);
278                         return ret;
279                 }
280
281                 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
282                 hw->mc_addrs_num++;
283         }
284         rte_spinlock_unlock(&hw->lock);
285
286         return 0;
287 }
288
289 static int
290 hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
291 {
292         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
293         struct hns3_hw *hw = &hns->hw;
294         struct rte_ether_addr *addr;
295         int err = 0;
296         int ret;
297         int i;
298
299         for (i = 0; i < hw->mc_addrs_num; i++) {
300                 addr = &hw->mc_addrs[i];
301                 if (!rte_is_multicast_ether_addr(addr))
302                         continue;
303                 if (del)
304                         ret = hns3vf_remove_mc_mac_addr(hns, addr);
305                 else
306                         ret = hns3vf_add_mc_mac_addr(hns, addr);
307                 if (ret) {
308                         err = ret;
309                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
310                                               addr);
311                         hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d",
312                                  del ? "Remove" : "Restore", mac_str, ret);
313                 }
314         }
315         return err;
316 }
317
318 static int
319 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
320 {
321         struct hns3_mbx_vf_to_pf_cmd *req;
322         struct hns3_cmd_desc desc;
323         int ret;
324
325         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
326
327         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
328         req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
329         req->msg[1] = en_bc_pmc ? 1 : 0;
330
331         ret = hns3_cmd_send(hw, &desc, 1);
332         if (ret)
333                 hns3_err(hw, "Set promisc mode fail, status is %d", ret);
334
335         return ret;
336 }
337
338 static int
339 hns3vf_dev_configure(struct rte_eth_dev *dev)
340 {
341         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
342         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
343         struct rte_eth_conf *conf = &dev->data->dev_conf;
344         enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
345         uint16_t nb_rx_q = dev->data->nb_rx_queues;
346         uint16_t nb_tx_q = dev->data->nb_tx_queues;
347         struct rte_eth_rss_conf rss_conf;
348         uint16_t mtu;
349         int ret;
350
351         /*
352          * Hardware does not support where the number of rx and tx queues is
353          * not equal in hip08.
354          */
355         if (nb_rx_q != nb_tx_q) {
356                 hns3_err(hw,
357                          "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
358                          "Hardware does not support this configuration!",
359                          nb_rx_q, nb_tx_q);
360                 return -EINVAL;
361         }
362
363         if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
364                 hns3_err(hw, "setting link speed/duplex not supported");
365                 return -EINVAL;
366         }
367
368         hw->adapter_state = HNS3_NIC_CONFIGURING;
369
370         /* When RSS is not configured, redirect the packet queue 0 */
371         if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
372                 rss_conf = conf->rx_adv_conf.rss_conf;
373                 if (rss_conf.rss_key == NULL) {
374                         rss_conf.rss_key = rss_cfg->key;
375                         rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
376                 }
377
378                 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
379                 if (ret)
380                         goto cfg_err;
381         }
382
383         /*
384          * If jumbo frames are enabled, MTU needs to be refreshed
385          * according to the maximum RX packet length.
386          */
387         if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
388                 /*
389                  * Security of max_rx_pkt_len is guaranteed in dpdk frame.
390                  * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
391                  * can safely assign to "uint16_t" type variable.
392                  */
393                 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
394                 ret = hns3vf_dev_mtu_set(dev, mtu);
395                 if (ret)
396                         goto cfg_err;
397                 dev->data->mtu = mtu;
398         }
399
400         ret = hns3vf_dev_configure_vlan(dev);
401         if (ret)
402                 goto cfg_err;
403
404         hw->adapter_state = HNS3_NIC_CONFIGURED;
405         return 0;
406
407 cfg_err:
408         hw->adapter_state = HNS3_NIC_INITIALIZED;
409         return ret;
410 }
411
412 static int
413 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
414 {
415         int ret;
416
417         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
418                                 sizeof(mtu), true, NULL, 0);
419         if (ret)
420                 hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
421
422         return ret;
423 }
424
425 static int
426 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
427 {
428         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
429         uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
430         int ret;
431
432         if (dev->data->dev_started) {
433                 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
434                          "before configuration", dev->data->port_id);
435                 return -EBUSY;
436         }
437
438         if (rte_atomic16_read(&hw->reset.resetting)) {
439                 hns3_err(hw, "Failed to set mtu during resetting");
440                 return -EIO;
441         }
442
443         rte_spinlock_lock(&hw->lock);
444         ret = hns3vf_config_mtu(hw, mtu);
445         if (ret) {
446                 rte_spinlock_unlock(&hw->lock);
447                 return ret;
448         }
449         if (frame_size > RTE_ETHER_MAX_LEN)
450                 dev->data->dev_conf.rxmode.offloads |=
451                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
452         else
453                 dev->data->dev_conf.rxmode.offloads &=
454                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
455         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
456         rte_spinlock_unlock(&hw->lock);
457
458         return 0;
459 }
460
461 static int
462 hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
463 {
464         struct hns3_adapter *hns = eth_dev->data->dev_private;
465         struct hns3_hw *hw = &hns->hw;
466
467         info->max_rx_queues = hw->tqps_num;
468         info->max_tx_queues = hw->tqps_num;
469         info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
470         info->min_rx_bufsize = hw->rx_buf_len;
471         info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
472         info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
473
474         info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
475                                  DEV_RX_OFFLOAD_UDP_CKSUM |
476                                  DEV_RX_OFFLOAD_TCP_CKSUM |
477                                  DEV_RX_OFFLOAD_SCTP_CKSUM |
478                                  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
479                                  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
480                                  DEV_RX_OFFLOAD_KEEP_CRC |
481                                  DEV_RX_OFFLOAD_SCATTER |
482                                  DEV_RX_OFFLOAD_VLAN_STRIP |
483                                  DEV_RX_OFFLOAD_QINQ_STRIP |
484                                  DEV_RX_OFFLOAD_VLAN_FILTER |
485                                  DEV_RX_OFFLOAD_JUMBO_FRAME);
486         info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
487         info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
488                                  DEV_TX_OFFLOAD_IPV4_CKSUM |
489                                  DEV_TX_OFFLOAD_TCP_CKSUM |
490                                  DEV_TX_OFFLOAD_UDP_CKSUM |
491                                  DEV_TX_OFFLOAD_SCTP_CKSUM |
492                                  DEV_TX_OFFLOAD_VLAN_INSERT |
493                                  DEV_TX_OFFLOAD_QINQ_INSERT |
494                                  DEV_TX_OFFLOAD_MULTI_SEGS |
495                                  info->tx_queue_offload_capa);
496
497         info->rx_desc_lim = (struct rte_eth_desc_lim) {
498                 .nb_max = HNS3_MAX_RING_DESC,
499                 .nb_min = HNS3_MIN_RING_DESC,
500                 .nb_align = HNS3_ALIGN_RING_DESC,
501         };
502
503         info->tx_desc_lim = (struct rte_eth_desc_lim) {
504                 .nb_max = HNS3_MAX_RING_DESC,
505                 .nb_min = HNS3_MIN_RING_DESC,
506                 .nb_align = HNS3_ALIGN_RING_DESC,
507         };
508
509         info->vmdq_queue_num = 0;
510
511         info->reta_size = HNS3_RSS_IND_TBL_SIZE;
512         info->hash_key_size = HNS3_RSS_KEY_SIZE;
513         info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
514         info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
515         info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
516
517         return 0;
518 }
519
520 static void
521 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
522 {
523         hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
524 }
525
526 static void
527 hns3vf_disable_irq0(struct hns3_hw *hw)
528 {
529         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
530 }
531
532 static void
533 hns3vf_enable_irq0(struct hns3_hw *hw)
534 {
535         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
536 }
537
538 static enum hns3vf_evt_cause
539 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
540 {
541         struct hns3_hw *hw = &hns->hw;
542         enum hns3vf_evt_cause ret;
543         uint32_t cmdq_stat_reg;
544         uint32_t rst_ing_reg;
545         uint32_t val;
546
547         /* Fetch the events from their corresponding regs */
548         cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
549
550         if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
551                 rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
552                 hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
553                 hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
554                 rte_atomic16_set(&hw->reset.disable_cmd, 1);
555                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
556                 hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
557                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
558                 if (clearval) {
559                         hw->reset.stats.global_cnt++;
560                         hns3_warn(hw, "Global reset detected, clear reset status");
561                 } else {
562                         hns3_schedule_delayed_reset(hns);
563                         hns3_warn(hw, "Global reset detected, don't clear reset status");
564                 }
565
566                 ret = HNS3VF_VECTOR0_EVENT_RST;
567                 goto out;
568         }
569
570         /* Check for vector0 mailbox(=CMDQ RX) event source */
571         if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
572                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
573                 ret = HNS3VF_VECTOR0_EVENT_MBX;
574                 goto out;
575         }
576
577         val = 0;
578         ret = HNS3VF_VECTOR0_EVENT_OTHER;
579 out:
580         if (clearval)
581                 *clearval = val;
582         return ret;
583 }
584
585 static void
586 hns3vf_interrupt_handler(void *param)
587 {
588         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
589         struct hns3_adapter *hns = dev->data->dev_private;
590         struct hns3_hw *hw = &hns->hw;
591         enum hns3vf_evt_cause event_cause;
592         uint32_t clearval;
593
594         if (hw->irq_thread_id == 0)
595                 hw->irq_thread_id = pthread_self();
596
597         /* Disable interrupt */
598         hns3vf_disable_irq0(hw);
599
600         /* Read out interrupt causes */
601         event_cause = hns3vf_check_event_cause(hns, &clearval);
602
603         switch (event_cause) {
604         case HNS3VF_VECTOR0_EVENT_RST:
605                 hns3_schedule_reset(hns);
606                 break;
607         case HNS3VF_VECTOR0_EVENT_MBX:
608                 hns3_dev_handle_mbx_msg(hw);
609                 break;
610         default:
611                 break;
612         }
613
614         /* Clear interrupt causes */
615         hns3vf_clear_event_cause(hw, clearval);
616
617         /* Enable interrupt */
618         hns3vf_enable_irq0(hw);
619 }
620
621 static int
622 hns3vf_check_tqp_info(struct hns3_hw *hw)
623 {
624         uint16_t tqps_num;
625
626         tqps_num = hw->tqps_num;
627         if (tqps_num > HNS3_MAX_TQP_NUM_PER_FUNC || tqps_num == 0) {
628                 PMD_INIT_LOG(ERR, "Get invalid tqps_num(%u) from PF. valid "
629                                   "range: 1~%d",
630                              tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
631                 return -EINVAL;
632         }
633
634         if (hw->rx_buf_len == 0)
635                 hw->rx_buf_len = HNS3_DEFAULT_RX_BUF_LEN;
636         hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, hw->tqps_num);
637
638         return 0;
639 }
640
641 static int
642 hns3vf_get_queue_info(struct hns3_hw *hw)
643 {
644 #define HNS3VF_TQPS_RSS_INFO_LEN        6
645         uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
646         int ret;
647
648         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
649                                 resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
650         if (ret) {
651                 PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
652                 return ret;
653         }
654
655         memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
656         memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
657         memcpy(&hw->rx_buf_len, &resp_msg[4], sizeof(uint16_t));
658
659         return hns3vf_check_tqp_info(hw);
660 }
661
662 static int
663 hns3vf_get_queue_depth(struct hns3_hw *hw)
664 {
665 #define HNS3VF_TQPS_DEPTH_INFO_LEN      4
666         uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN];
667         int ret;
668
669         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true,
670                                 resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN);
671         if (ret) {
672                 PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d",
673                              ret);
674                 return ret;
675         }
676
677         memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t));
678         memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t));
679
680         return 0;
681 }
682
683 static int
684 hns3vf_get_tc_info(struct hns3_hw *hw)
685 {
686         uint8_t resp_msg;
687         int ret;
688
689         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0,
690                                 true, &resp_msg, sizeof(resp_msg));
691         if (ret) {
692                 hns3_err(hw, "VF request to get TC info from PF failed %d",
693                          ret);
694                 return ret;
695         }
696
697         hw->hw_tc_map = resp_msg;
698
699         return 0;
700 }
701
702 static int
703 hns3vf_get_configuration(struct hns3_hw *hw)
704 {
705         int ret;
706
707         hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
708
709         /* Get queue configuration from PF */
710         ret = hns3vf_get_queue_info(hw);
711         if (ret)
712                 return ret;
713
714         /* Get queue depth info from PF */
715         ret = hns3vf_get_queue_depth(hw);
716         if (ret)
717                 return ret;
718
719         /* Get tc configuration from PF */
720         return hns3vf_get_tc_info(hw);
721 }
722
723 static void
724 hns3vf_set_tc_info(struct hns3_adapter *hns)
725 {
726         struct hns3_hw *hw = &hns->hw;
727         uint16_t nb_rx_q = hw->data->nb_rx_queues;
728         uint16_t new_tqps;
729         uint8_t i;
730
731         hw->num_tc = 0;
732         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
733                 if (hw->hw_tc_map & BIT(i))
734                         hw->num_tc++;
735
736         new_tqps = RTE_MIN(hw->tqps_num, nb_rx_q);
737         hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, new_tqps / hw->num_tc);
738         hw->alloc_tqps = hw->alloc_rss_size * hw->num_tc;
739
740         hns3_tc_queue_mapping_cfg(hw);
741 }
742
743 static void
744 hns3vf_request_link_info(struct hns3_hw *hw)
745 {
746         uint8_t resp_msg;
747         int ret;
748
749         if (rte_atomic16_read(&hw->reset.resetting))
750                 return;
751         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
752                                 &resp_msg, sizeof(resp_msg));
753         if (ret)
754                 hns3_err(hw, "Failed to fetch link status from PF: %d", ret);
755 }
756
757 static int
758 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
759 {
760 #define HNS3VF_VLAN_MBX_MSG_LEN 5
761         struct hns3_hw *hw = &hns->hw;
762         uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
763         uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
764         uint8_t is_kill = on ? 0 : 1;
765
766         msg_data[0] = is_kill;
767         memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
768         memcpy(&msg_data[3], &proto, sizeof(proto));
769
770         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
771                                  msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
772                                  0);
773 }
774
775 static int
776 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
777 {
778         struct hns3_adapter *hns = dev->data->dev_private;
779         struct hns3_hw *hw = &hns->hw;
780         int ret;
781
782         if (rte_atomic16_read(&hw->reset.resetting)) {
783                 hns3_err(hw,
784                          "vf set vlan id failed during resetting, vlan_id =%u",
785                          vlan_id);
786                 return -EIO;
787         }
788         rte_spinlock_lock(&hw->lock);
789         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
790         rte_spinlock_unlock(&hw->lock);
791         if (ret)
792                 hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
793                          vlan_id, ret);
794
795         return ret;
796 }
797
798 static int
799 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
800 {
801         uint8_t msg_data;
802         int ret;
803
804         msg_data = enable ? 1 : 0;
805         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
806                                 &msg_data, sizeof(msg_data), false, NULL, 0);
807         if (ret)
808                 hns3_err(hw, "vf enable strip failed, ret =%d", ret);
809
810         return ret;
811 }
812
813 static int
814 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
815 {
816         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
817         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
818         unsigned int tmp_mask;
819
820         tmp_mask = (unsigned int)mask;
821         /* Vlan stripping setting */
822         if (tmp_mask & ETH_VLAN_STRIP_MASK) {
823                 rte_spinlock_lock(&hw->lock);
824                 /* Enable or disable VLAN stripping */
825                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
826                         hns3vf_en_hw_strip_rxvtag(hw, true);
827                 else
828                         hns3vf_en_hw_strip_rxvtag(hw, false);
829                 rte_spinlock_unlock(&hw->lock);
830         }
831
832         return 0;
833 }
834
835 static int
836 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
837 {
838         struct rte_vlan_filter_conf *vfc;
839         struct hns3_hw *hw = &hns->hw;
840         uint16_t vlan_id;
841         uint64_t vbit;
842         uint64_t ids;
843         int ret = 0;
844         uint32_t i;
845
846         vfc = &hw->data->vlan_filter_conf;
847         for (i = 0; i < RTE_DIM(vfc->ids); i++) {
848                 if (vfc->ids[i] == 0)
849                         continue;
850                 ids = vfc->ids[i];
851                 while (ids) {
852                         /*
853                          * 64 means the num bits of ids, one bit corresponds to
854                          * one vlan id
855                          */
856                         vlan_id = 64 * i;
857                         /* count trailing zeroes */
858                         vbit = ~ids & (ids - 1);
859                         /* clear least significant bit set */
860                         ids ^= (ids ^ (ids - 1)) ^ vbit;
861                         for (; vbit;) {
862                                 vbit >>= 1;
863                                 vlan_id++;
864                         }
865                         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
866                         if (ret) {
867                                 hns3_err(hw,
868                                          "VF handle vlan table failed, ret =%d, on = %d",
869                                          ret, on);
870                                 return ret;
871                         }
872                 }
873         }
874
875         return ret;
876 }
877
878 static int
879 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
880 {
881         return hns3vf_handle_all_vlan_table(hns, 0);
882 }
883
884 static int
885 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
886 {
887         struct hns3_hw *hw = &hns->hw;
888         struct rte_eth_conf *dev_conf;
889         bool en;
890         int ret;
891
892         dev_conf = &hw->data->dev_conf;
893         en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
894                                                                    : false;
895         ret = hns3vf_en_hw_strip_rxvtag(hw, en);
896         if (ret)
897                 hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
898                          ret);
899         return ret;
900 }
901
902 static int
903 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
904 {
905         struct hns3_adapter *hns = dev->data->dev_private;
906         struct rte_eth_dev_data *data = dev->data;
907         struct hns3_hw *hw = &hns->hw;
908         int ret;
909
910         if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
911             data->dev_conf.txmode.hw_vlan_reject_untagged ||
912             data->dev_conf.txmode.hw_vlan_insert_pvid) {
913                 hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
914                               "or hw_vlan_insert_pvid is not support!");
915         }
916
917         /* Apply vlan offload setting */
918         ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
919         if (ret)
920                 hns3_err(hw, "dev config vlan offload failed, ret =%d", ret);
921
922         return ret;
923 }
924
925 static int
926 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
927 {
928         uint8_t msg_data;
929
930         msg_data = alive ? 1 : 0;
931         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
932                                  sizeof(msg_data), false, NULL, 0);
933 }
934
935 static void
936 hns3vf_keep_alive_handler(void *param)
937 {
938         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
939         struct hns3_adapter *hns = eth_dev->data->dev_private;
940         struct hns3_hw *hw = &hns->hw;
941         uint8_t respmsg;
942         int ret;
943
944         ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
945                                 false, &respmsg, sizeof(uint8_t));
946         if (ret)
947                 hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
948                          ret);
949
950         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
951                           eth_dev);
952 }
953
954 static void
955 hns3vf_service_handler(void *param)
956 {
957         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
958         struct hns3_adapter *hns = eth_dev->data->dev_private;
959         struct hns3_hw *hw = &hns->hw;
960
961         /*
962          * The query link status and reset processing are executed in the
963          * interrupt thread.When the IMP reset occurs, IMP will not respond,
964          * and the query operation will time out after 30ms. In the case of
965          * multiple PF/VFs, each query failure timeout causes the IMP reset
966          * interrupt to fail to respond within 100ms.
967          * Before querying the link status, check whether there is a reset
968          * pending, and if so, abandon the query.
969          */
970         if (!hns3vf_is_reset_pending(hns))
971                 hns3vf_request_link_info(hw);
972         else
973                 hns3_warn(hw, "Cancel the query when reset is pending");
974
975         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
976                           eth_dev);
977 }
978
979 static int
980 hns3vf_init_hardware(struct hns3_adapter *hns)
981 {
982         struct hns3_hw *hw = &hns->hw;
983         uint16_t mtu = hw->data->mtu;
984         int ret;
985
986         ret = hns3vf_set_promisc_mode(hw, true);
987         if (ret)
988                 return ret;
989
990         ret = hns3vf_config_mtu(hw, mtu);
991         if (ret)
992                 goto err_init_hardware;
993
994         ret = hns3vf_vlan_filter_configure(hns, 0, 1);
995         if (ret) {
996                 PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
997                 goto err_init_hardware;
998         }
999
1000         ret = hns3_config_gro(hw, false);
1001         if (ret) {
1002                 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1003                 goto err_init_hardware;
1004         }
1005
1006         ret = hns3vf_set_alive(hw, true);
1007         if (ret) {
1008                 PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1009                 goto err_init_hardware;
1010         }
1011
1012         hns3vf_request_link_info(hw);
1013         return 0;
1014
1015 err_init_hardware:
1016         (void)hns3vf_set_promisc_mode(hw, false);
1017         return ret;
1018 }
1019
1020 static int
1021 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1022 {
1023         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1024         struct hns3_adapter *hns = eth_dev->data->dev_private;
1025         struct hns3_hw *hw = &hns->hw;
1026         int ret;
1027
1028         PMD_INIT_FUNC_TRACE();
1029
1030         /* Get hardware io base address from pcie BAR2 IO space */
1031         hw->io_base = pci_dev->mem_resource[2].addr;
1032
1033         /* Firmware command queue initialize */
1034         ret = hns3_cmd_init_queue(hw);
1035         if (ret) {
1036                 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1037                 goto err_cmd_init_queue;
1038         }
1039
1040         /* Firmware command initialize */
1041         ret = hns3_cmd_init(hw);
1042         if (ret) {
1043                 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1044                 goto err_cmd_init;
1045         }
1046
1047         rte_spinlock_init(&hw->mbx_resp.lock);
1048
1049         hns3vf_clear_event_cause(hw, 0);
1050
1051         ret = rte_intr_callback_register(&pci_dev->intr_handle,
1052                                          hns3vf_interrupt_handler, eth_dev);
1053         if (ret) {
1054                 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1055                 goto err_intr_callback_register;
1056         }
1057
1058         /* Enable interrupt */
1059         rte_intr_enable(&pci_dev->intr_handle);
1060         hns3vf_enable_irq0(hw);
1061
1062         /* Get configuration from PF */
1063         ret = hns3vf_get_configuration(hw);
1064         if (ret) {
1065                 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1066                 goto err_get_config;
1067         }
1068
1069         rte_eth_random_addr(hw->mac.mac_addr); /* Generate a random mac addr */
1070
1071         ret = hns3vf_init_hardware(hns);
1072         if (ret)
1073                 goto err_get_config;
1074
1075         hns3_set_default_rss_args(hw);
1076
1077         (void)hns3_stats_reset(eth_dev);
1078         return 0;
1079
1080 err_get_config:
1081         hns3vf_disable_irq0(hw);
1082         rte_intr_disable(&pci_dev->intr_handle);
1083         hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
1084                              eth_dev);
1085 err_intr_callback_register:
1086         hns3_cmd_uninit(hw);
1087
1088 err_cmd_init:
1089         hns3_cmd_destroy_queue(hw);
1090
1091 err_cmd_init_queue:
1092         hw->io_base = NULL;
1093
1094         return ret;
1095 }
1096
1097 static void
1098 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1099 {
1100         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1101         struct hns3_adapter *hns = eth_dev->data->dev_private;
1102         struct hns3_hw *hw = &hns->hw;
1103
1104         PMD_INIT_FUNC_TRACE();
1105
1106         hns3_rss_uninit(hns);
1107         (void)hns3vf_set_alive(hw, false);
1108         (void)hns3vf_set_promisc_mode(hw, false);
1109         hns3vf_disable_irq0(hw);
1110         rte_intr_disable(&pci_dev->intr_handle);
1111         hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
1112                              eth_dev);
1113         hns3_cmd_uninit(hw);
1114         hns3_cmd_destroy_queue(hw);
1115         hw->io_base = NULL;
1116 }
1117
1118 static int
1119 hns3vf_do_stop(struct hns3_adapter *hns)
1120 {
1121         struct hns3_hw *hw = &hns->hw;
1122         bool reset_queue;
1123
1124         hw->mac.link_status = ETH_LINK_DOWN;
1125
1126         if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
1127                 hns3vf_configure_mac_addr(hns, true);
1128                 reset_queue = true;
1129         } else
1130                 reset_queue = false;
1131         return hns3_stop_queues(hns, reset_queue);
1132 }
1133
1134 static void
1135 hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
1136 {
1137         struct hns3_adapter *hns = eth_dev->data->dev_private;
1138         struct hns3_hw *hw = &hns->hw;
1139
1140         PMD_INIT_FUNC_TRACE();
1141
1142         hw->adapter_state = HNS3_NIC_STOPPING;
1143         hns3_set_rxtx_function(eth_dev);
1144
1145         rte_spinlock_lock(&hw->lock);
1146         if (rte_atomic16_read(&hw->reset.resetting) == 0) {
1147                 hns3vf_do_stop(hns);
1148                 hns3_dev_release_mbufs(hns);
1149                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1150         }
1151         rte_spinlock_unlock(&hw->lock);
1152 }
1153
1154 static void
1155 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1156 {
1157         struct hns3_adapter *hns = eth_dev->data->dev_private;
1158         struct hns3_hw *hw = &hns->hw;
1159
1160         if (hw->adapter_state == HNS3_NIC_STARTED)
1161                 hns3vf_dev_stop(eth_dev);
1162
1163         hw->adapter_state = HNS3_NIC_CLOSING;
1164         hns3_reset_abort(hns);
1165         hw->adapter_state = HNS3_NIC_CLOSED;
1166         rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1167         rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
1168         hns3vf_configure_all_mc_mac_addr(hns, true);
1169         hns3vf_remove_all_vlan_table(hns);
1170         hns3vf_uninit_vf(eth_dev);
1171         hns3_free_all_queues(eth_dev);
1172         rte_free(hw->reset.wait_data);
1173         rte_free(eth_dev->process_private);
1174         eth_dev->process_private = NULL;
1175         hns3_warn(hw, "Close port %d finished", hw->data->port_id);
1176 }
1177
1178 static int
1179 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1180                        __rte_unused int wait_to_complete)
1181 {
1182         struct hns3_adapter *hns = eth_dev->data->dev_private;
1183         struct hns3_hw *hw = &hns->hw;
1184         struct hns3_mac *mac = &hw->mac;
1185         struct rte_eth_link new_link;
1186
1187         hns3vf_request_link_info(hw);
1188
1189         memset(&new_link, 0, sizeof(new_link));
1190         switch (mac->link_speed) {
1191         case ETH_SPEED_NUM_10M:
1192         case ETH_SPEED_NUM_100M:
1193         case ETH_SPEED_NUM_1G:
1194         case ETH_SPEED_NUM_10G:
1195         case ETH_SPEED_NUM_25G:
1196         case ETH_SPEED_NUM_40G:
1197         case ETH_SPEED_NUM_50G:
1198         case ETH_SPEED_NUM_100G:
1199                 new_link.link_speed = mac->link_speed;
1200                 break;
1201         default:
1202                 new_link.link_speed = ETH_SPEED_NUM_100M;
1203                 break;
1204         }
1205
1206         new_link.link_duplex = mac->link_duplex;
1207         new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
1208         new_link.link_autoneg =
1209             !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
1210
1211         return rte_eth_linkstatus_set(eth_dev, &new_link);
1212 }
1213
1214 static int
1215 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1216 {
1217         struct hns3_hw *hw = &hns->hw;
1218         int ret;
1219
1220         hns3vf_set_tc_info(hns);
1221
1222         ret = hns3_start_queues(hns, reset_queue);
1223         if (ret) {
1224                 hns3_err(hw, "Failed to start queues: %d", ret);
1225                 return ret;
1226         }
1227
1228         return 0;
1229 }
1230
1231 static int
1232 hns3vf_dev_start(struct rte_eth_dev *eth_dev)
1233 {
1234         struct hns3_adapter *hns = eth_dev->data->dev_private;
1235         struct hns3_hw *hw = &hns->hw;
1236         int ret;
1237
1238         PMD_INIT_FUNC_TRACE();
1239         if (rte_atomic16_read(&hw->reset.resetting))
1240                 return -EBUSY;
1241         rte_spinlock_lock(&hw->lock);
1242         hw->adapter_state = HNS3_NIC_STARTING;
1243         ret = hns3vf_do_start(hns, true);
1244         if (ret) {
1245                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1246                 rte_spinlock_unlock(&hw->lock);
1247                 return ret;
1248         }
1249         hw->adapter_state = HNS3_NIC_STARTED;
1250         rte_spinlock_unlock(&hw->lock);
1251         hns3_set_rxtx_function(eth_dev);
1252         return 0;
1253 }
1254
1255 static bool
1256 is_vf_reset_done(struct hns3_hw *hw)
1257 {
1258 #define HNS3_FUN_RST_ING_BITS \
1259         (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1260          BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1261          BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1262          BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1263
1264         uint32_t val;
1265
1266         if (hw->reset.level == HNS3_VF_RESET) {
1267                 val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1268                 if (val & HNS3_VF_RST_ING_BIT)
1269                         return false;
1270         } else {
1271                 val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1272                 if (val & HNS3_FUN_RST_ING_BITS)
1273                         return false;
1274         }
1275         return true;
1276 }
1277
1278 bool
1279 hns3vf_is_reset_pending(struct hns3_adapter *hns)
1280 {
1281         struct hns3_hw *hw = &hns->hw;
1282         enum hns3_reset_level reset;
1283
1284         hns3vf_check_event_cause(hns, NULL);
1285         reset = hns3vf_get_reset_level(hw, &hw->reset.pending);
1286         if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
1287                 hns3_warn(hw, "High level reset %d is pending", reset);
1288                 return true;
1289         }
1290         return false;
1291 }
1292
1293 static int
1294 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1295 {
1296         struct hns3_hw *hw = &hns->hw;
1297         struct hns3_wait_data *wait_data = hw->reset.wait_data;
1298         struct timeval tv;
1299
1300         if (wait_data->result == HNS3_WAIT_SUCCESS)
1301                 return 0;
1302         else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1303                 gettimeofday(&tv, NULL);
1304                 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1305                           tv.tv_sec, tv.tv_usec);
1306                 return -ETIME;
1307         } else if (wait_data->result == HNS3_WAIT_REQUEST)
1308                 return -EAGAIN;
1309
1310         wait_data->hns = hns;
1311         wait_data->check_completion = is_vf_reset_done;
1312         wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1313                                       HNS3VF_RESET_WAIT_MS + get_timeofday_ms();
1314         wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1315         wait_data->count = HNS3VF_RESET_WAIT_CNT;
1316         wait_data->result = HNS3_WAIT_REQUEST;
1317         rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1318         return -EAGAIN;
1319 }
1320
1321 static int
1322 hns3vf_prepare_reset(struct hns3_adapter *hns)
1323 {
1324         struct hns3_hw *hw = &hns->hw;
1325         int ret = 0;
1326
1327         if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1328                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL,
1329                                         0, true, NULL, 0);
1330         }
1331         rte_atomic16_set(&hw->reset.disable_cmd, 1);
1332
1333         return ret;
1334 }
1335
1336 static int
1337 hns3vf_stop_service(struct hns3_adapter *hns)
1338 {
1339         struct hns3_hw *hw = &hns->hw;
1340         struct rte_eth_dev *eth_dev;
1341
1342         eth_dev = &rte_eth_devices[hw->data->port_id];
1343         rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
1344         hw->mac.link_status = ETH_LINK_DOWN;
1345
1346         hns3_set_rxtx_function(eth_dev);
1347
1348         rte_spinlock_lock(&hw->lock);
1349         if (hw->adapter_state == HNS3_NIC_STARTED ||
1350             hw->adapter_state == HNS3_NIC_STOPPING) {
1351                 hns3vf_do_stop(hns);
1352                 hw->reset.mbuf_deferred_free = true;
1353         } else
1354                 hw->reset.mbuf_deferred_free = false;
1355
1356         /*
1357          * It is cumbersome for hardware to pick-and-choose entries for deletion
1358          * from table space. Hence, for function reset software intervention is
1359          * required to delete the entries.
1360          */
1361         if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
1362                 hns3vf_configure_all_mc_mac_addr(hns, true);
1363         rte_spinlock_unlock(&hw->lock);
1364
1365         return 0;
1366 }
1367
1368 static int
1369 hns3vf_start_service(struct hns3_adapter *hns)
1370 {
1371         struct hns3_hw *hw = &hns->hw;
1372         struct rte_eth_dev *eth_dev;
1373
1374         eth_dev = &rte_eth_devices[hw->data->port_id];
1375         hns3_set_rxtx_function(eth_dev);
1376
1377         hns3vf_service_handler(eth_dev);
1378         return 0;
1379 }
1380
1381 static int
1382 hns3vf_restore_conf(struct hns3_adapter *hns)
1383 {
1384         struct hns3_hw *hw = &hns->hw;
1385         int ret;
1386
1387         ret = hns3vf_configure_mac_addr(hns, false);
1388         if (ret)
1389                 return ret;
1390
1391         ret = hns3vf_configure_all_mc_mac_addr(hns, false);
1392         if (ret)
1393                 goto err_mc_mac;
1394
1395         ret = hns3vf_restore_vlan_conf(hns);
1396         if (ret)
1397                 goto err_vlan_table;
1398
1399         if (hw->adapter_state == HNS3_NIC_STARTED) {
1400                 ret = hns3vf_do_start(hns, false);
1401                 if (ret)
1402                         goto err_vlan_table;
1403                 hns3_info(hw, "hns3vf dev restart successful!");
1404         } else if (hw->adapter_state == HNS3_NIC_STOPPING)
1405                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1406         return 0;
1407
1408 err_vlan_table:
1409         hns3vf_configure_all_mc_mac_addr(hns, true);
1410 err_mc_mac:
1411         hns3vf_configure_mac_addr(hns, true);
1412         return ret;
1413 }
1414
1415 static enum hns3_reset_level
1416 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
1417 {
1418         enum hns3_reset_level reset_level;
1419
1420         /* return the highest priority reset level amongst all */
1421         if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
1422                 reset_level = HNS3_VF_RESET;
1423         else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
1424                 reset_level = HNS3_VF_FULL_RESET;
1425         else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
1426                 reset_level = HNS3_VF_PF_FUNC_RESET;
1427         else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
1428                 reset_level = HNS3_VF_FUNC_RESET;
1429         else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
1430                 reset_level = HNS3_FLR_RESET;
1431         else
1432                 reset_level = HNS3_NONE_RESET;
1433
1434         if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
1435                 return HNS3_NONE_RESET;
1436
1437         return reset_level;
1438 }
1439
1440 static void
1441 hns3vf_reset_service(void *param)
1442 {
1443         struct hns3_adapter *hns = (struct hns3_adapter *)param;
1444         struct hns3_hw *hw = &hns->hw;
1445         enum hns3_reset_level reset_level;
1446         struct timeval tv_delta;
1447         struct timeval tv_start;
1448         struct timeval tv;
1449         uint64_t msec;
1450
1451         /*
1452          * The interrupt is not triggered within the delay time.
1453          * The interrupt may have been lost. It is necessary to handle
1454          * the interrupt to recover from the error.
1455          */
1456         if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
1457                 rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
1458                 hns3_err(hw, "Handling interrupts in delayed tasks");
1459                 hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
1460         }
1461         rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
1462
1463         /*
1464          * Hardware reset has been notified, we now have to poll & check if
1465          * hardware has actually completed the reset sequence.
1466          */
1467         reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
1468         if (reset_level != HNS3_NONE_RESET) {
1469                 gettimeofday(&tv_start, NULL);
1470                 hns3_reset_process(hns, reset_level);
1471                 gettimeofday(&tv, NULL);
1472                 timersub(&tv, &tv_start, &tv_delta);
1473                 msec = tv_delta.tv_sec * MSEC_PER_SEC +
1474                        tv_delta.tv_usec / USEC_PER_MSEC;
1475                 if (msec > HNS3_RESET_PROCESS_MS)
1476                         hns3_err(hw, "%d handle long time delta %" PRIx64
1477                                  " ms time=%ld.%.6ld",
1478                                  hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
1479         }
1480 }
1481
1482 static int
1483 hns3vf_reinit_dev(struct hns3_adapter *hns)
1484 {
1485         struct hns3_hw *hw = &hns->hw;
1486         int ret;
1487
1488         /* Firmware command initialize */
1489         ret = hns3_cmd_init(hw);
1490         if (ret) {
1491                 hns3_err(hw, "Failed to init cmd: %d", ret);
1492                 return ret;
1493         }
1494
1495         ret = hns3_reset_all_queues(hns);
1496         if (ret) {
1497                 hns3_err(hw, "Failed to reset all queues: %d", ret);
1498                 goto err_init;
1499         }
1500
1501         ret = hns3vf_init_hardware(hns);
1502         if (ret) {
1503                 hns3_err(hw, "Failed to init hardware: %d", ret);
1504                 goto err_init;
1505         }
1506
1507         return 0;
1508
1509 err_init:
1510         hns3_cmd_uninit(hw);
1511         return ret;
1512 }
1513
1514 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
1515         .dev_start          = hns3vf_dev_start,
1516         .dev_stop           = hns3vf_dev_stop,
1517         .dev_close          = hns3vf_dev_close,
1518         .mtu_set            = hns3vf_dev_mtu_set,
1519         .stats_get          = hns3_stats_get,
1520         .stats_reset        = hns3_stats_reset,
1521         .xstats_get         = hns3_dev_xstats_get,
1522         .xstats_get_names   = hns3_dev_xstats_get_names,
1523         .xstats_reset       = hns3_dev_xstats_reset,
1524         .xstats_get_by_id   = hns3_dev_xstats_get_by_id,
1525         .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
1526         .dev_infos_get      = hns3vf_dev_infos_get,
1527         .rx_queue_setup     = hns3_rx_queue_setup,
1528         .tx_queue_setup     = hns3_tx_queue_setup,
1529         .rx_queue_release   = hns3_dev_rx_queue_release,
1530         .tx_queue_release   = hns3_dev_tx_queue_release,
1531         .dev_configure      = hns3vf_dev_configure,
1532         .mac_addr_add       = hns3vf_add_mac_addr,
1533         .mac_addr_remove    = hns3vf_remove_mac_addr,
1534         .mac_addr_set       = hns3vf_set_default_mac_addr,
1535         .set_mc_addr_list   = hns3vf_set_mc_mac_addr_list,
1536         .link_update        = hns3vf_dev_link_update,
1537         .rss_hash_update    = hns3_dev_rss_hash_update,
1538         .rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
1539         .reta_update        = hns3_dev_rss_reta_update,
1540         .reta_query         = hns3_dev_rss_reta_query,
1541         .filter_ctrl        = hns3_dev_filter_ctrl,
1542         .vlan_filter_set    = hns3vf_vlan_filter_set,
1543         .vlan_offload_set   = hns3vf_vlan_offload_set,
1544         .get_reg            = hns3_get_regs,
1545         .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
1546 };
1547
1548 static const struct hns3_reset_ops hns3vf_reset_ops = {
1549         .reset_service       = hns3vf_reset_service,
1550         .stop_service        = hns3vf_stop_service,
1551         .prepare_reset       = hns3vf_prepare_reset,
1552         .wait_hardware_ready = hns3vf_wait_hardware_ready,
1553         .reinit_dev          = hns3vf_reinit_dev,
1554         .restore_conf        = hns3vf_restore_conf,
1555         .start_service       = hns3vf_start_service,
1556 };
1557
1558 static int
1559 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
1560 {
1561         struct hns3_adapter *hns = eth_dev->data->dev_private;
1562         struct hns3_hw *hw = &hns->hw;
1563         int ret;
1564
1565         PMD_INIT_FUNC_TRACE();
1566
1567         eth_dev->process_private = (struct hns3_process_private *)
1568             rte_zmalloc_socket("hns3_filter_list",
1569                                sizeof(struct hns3_process_private),
1570                                RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
1571         if (eth_dev->process_private == NULL) {
1572                 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
1573                 return -ENOMEM;
1574         }
1575
1576         /* initialize flow filter lists */
1577         hns3_filterlist_init(eth_dev);
1578
1579         hns3_set_rxtx_function(eth_dev);
1580         eth_dev->dev_ops = &hns3vf_eth_dev_ops;
1581         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1582                 return 0;
1583
1584         hw->adapter_state = HNS3_NIC_UNINITIALIZED;
1585         hns->is_vf = true;
1586         hw->data = eth_dev->data;
1587
1588         ret = hns3_reset_init(hw);
1589         if (ret)
1590                 goto err_init_reset;
1591         hw->reset.ops = &hns3vf_reset_ops;
1592
1593         ret = hns3vf_init_vf(eth_dev);
1594         if (ret) {
1595                 PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
1596                 goto err_init_vf;
1597         }
1598
1599         /* Allocate memory for storing MAC addresses */
1600         eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac",
1601                                                sizeof(struct rte_ether_addr) *
1602                                                HNS3_VF_UC_MACADDR_NUM, 0);
1603         if (eth_dev->data->mac_addrs == NULL) {
1604                 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
1605                              "to store MAC addresses",
1606                              sizeof(struct rte_ether_addr) *
1607                              HNS3_VF_UC_MACADDR_NUM);
1608                 ret = -ENOMEM;
1609                 goto err_rte_zmalloc;
1610         }
1611
1612         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
1613                             &eth_dev->data->mac_addrs[0]);
1614         hw->adapter_state = HNS3_NIC_INITIALIZED;
1615         /*
1616          * Pass the information to the rte_eth_dev_close() that it should also
1617          * release the private port resources.
1618          */
1619         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1620
1621         if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
1622                 hns3_err(hw, "Reschedule reset service after dev_init");
1623                 hns3_schedule_reset(hns);
1624         } else {
1625                 /* IMP will wait ready flag before reset */
1626                 hns3_notify_reset_ready(hw, false);
1627         }
1628         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1629                           eth_dev);
1630         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1631                           eth_dev);
1632         return 0;
1633
1634 err_rte_zmalloc:
1635         hns3vf_uninit_vf(eth_dev);
1636
1637 err_init_vf:
1638         rte_free(hw->reset.wait_data);
1639
1640 err_init_reset:
1641         eth_dev->dev_ops = NULL;
1642         eth_dev->rx_pkt_burst = NULL;
1643         eth_dev->tx_pkt_burst = NULL;
1644         eth_dev->tx_pkt_prepare = NULL;
1645         rte_free(eth_dev->process_private);
1646         eth_dev->process_private = NULL;
1647
1648         return ret;
1649 }
1650
1651 static int
1652 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
1653 {
1654         struct hns3_adapter *hns = eth_dev->data->dev_private;
1655         struct hns3_hw *hw = &hns->hw;
1656
1657         PMD_INIT_FUNC_TRACE();
1658
1659         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1660                 return -EPERM;
1661
1662         eth_dev->dev_ops = NULL;
1663         eth_dev->rx_pkt_burst = NULL;
1664         eth_dev->tx_pkt_burst = NULL;
1665         eth_dev->tx_pkt_prepare = NULL;
1666
1667         if (hw->adapter_state < HNS3_NIC_CLOSING)
1668                 hns3vf_dev_close(eth_dev);
1669
1670         hw->adapter_state = HNS3_NIC_REMOVED;
1671         return 0;
1672 }
1673
1674 static int
1675 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1676                      struct rte_pci_device *pci_dev)
1677 {
1678         return rte_eth_dev_pci_generic_probe(pci_dev,
1679                                              sizeof(struct hns3_adapter),
1680                                              hns3vf_dev_init);
1681 }
1682
1683 static int
1684 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
1685 {
1686         return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
1687 }
1688
1689 static const struct rte_pci_id pci_id_hns3vf_map[] = {
1690         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
1691         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
1692         { .vendor_id = 0, /* sentinel */ },
1693 };
1694
1695 static struct rte_pci_driver rte_hns3vf_pmd = {
1696         .id_table = pci_id_hns3vf_map,
1697         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1698         .probe = eth_hns3vf_pci_probe,
1699         .remove = eth_hns3vf_pci_remove,
1700 };
1701
1702 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
1703 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
1704 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");