net/hns3: support Rx/Tx and related operations
[dpdk.git] / drivers / net / hns3 / hns3_ethdev_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <arpa/inet.h>
12 #include <rte_alarm.h>
13 #include <rte_atomic.h>
14 #include <rte_bus_pci.h>
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
18 #include <rte_dev.h>
19 #include <rte_eal.h>
20 #include <rte_ether.h>
21 #include <rte_ethdev_driver.h>
22 #include <rte_ethdev_pci.h>
23 #include <rte_interrupts.h>
24 #include <rte_io.h>
25 #include <rte_log.h>
26 #include <rte_pci.h>
27
28 #include "hns3_ethdev.h"
29 #include "hns3_logs.h"
30 #include "hns3_rxtx.h"
31 #include "hns3_regs.h"
32 #include "hns3_dcb.h"
33
34 #define HNS3VF_KEEP_ALIVE_INTERVAL      2000000 /* us */
35 #define HNS3VF_SERVICE_INTERVAL         1000000 /* us */
36
37 #define HNS3VF_RESET_WAIT_MS    20
38 #define HNS3VF_RESET_WAIT_CNT   2000
39
40 enum hns3vf_evt_cause {
41         HNS3VF_VECTOR0_EVENT_RST,
42         HNS3VF_VECTOR0_EVENT_MBX,
43         HNS3VF_VECTOR0_EVENT_OTHER,
44 };
45
46 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
47 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
48
49 static int
50 hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
51                     __attribute__ ((unused)) uint32_t idx,
52                     __attribute__ ((unused)) uint32_t pool)
53 {
54         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
55         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
56         int ret;
57
58         rte_spinlock_lock(&hw->lock);
59         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
60                                 HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes,
61                                 RTE_ETHER_ADDR_LEN, false, NULL, 0);
62         rte_spinlock_unlock(&hw->lock);
63         if (ret) {
64                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
65                                       mac_addr);
66                 hns3_err(hw, "Failed to add mac addr(%s) for vf: %d", mac_str,
67                          ret);
68         }
69
70         return ret;
71 }
72
73 static void
74 hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
75 {
76         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
77         /* index will be checked by upper level rte interface */
78         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
79         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
80         int ret;
81
82         rte_spinlock_lock(&hw->lock);
83         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
84                                 HNS3_MBX_MAC_VLAN_UC_REMOVE,
85                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
86                                 NULL, 0);
87         rte_spinlock_unlock(&hw->lock);
88         if (ret) {
89                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
90                                       mac_addr);
91                 hns3_err(hw, "Failed to remove mac addr(%s) for vf: %d",
92                          mac_str, ret);
93         }
94 }
95
96 static int
97 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
98                             struct rte_ether_addr *mac_addr)
99 {
100 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
101         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
102         struct rte_ether_addr *old_addr;
103         uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
104         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
105         int ret;
106
107         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
108                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
109                                       mac_addr);
110                 hns3_err(hw, "Failed to set mac addr, addr(%s) invalid.",
111                          mac_str);
112                 return -EINVAL;
113         }
114
115         old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
116         rte_spinlock_lock(&hw->lock);
117         memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
118         memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
119                RTE_ETHER_ADDR_LEN);
120
121         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST,
122                                 HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes,
123                                 HNS3_TWO_ETHER_ADDR_LEN, false, NULL, 0);
124         if (ret) {
125                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
126                                       mac_addr);
127                 hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", mac_str,
128                          ret);
129         }
130
131         rte_ether_addr_copy(mac_addr,
132                             (struct rte_ether_addr *)hw->mac.mac_addr);
133         rte_spinlock_unlock(&hw->lock);
134
135         return ret;
136 }
137
138 static int
139 hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del)
140 {
141         struct hns3_hw *hw = &hns->hw;
142         struct rte_ether_addr *addr;
143         enum hns3_mbx_mac_vlan_subcode opcode;
144         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
145         int ret = 0;
146         int i;
147
148         if (del)
149                 opcode = HNS3_MBX_MAC_VLAN_UC_REMOVE;
150         else
151                 opcode = HNS3_MBX_MAC_VLAN_UC_ADD;
152         for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) {
153                 addr = &hw->data->mac_addrs[i];
154                 if (!rte_is_valid_assigned_ether_addr(addr))
155                         continue;
156                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
157                 hns3_dbg(hw, "rm mac addr: %s", mac_str);
158                 ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, opcode,
159                                         addr->addr_bytes, RTE_ETHER_ADDR_LEN,
160                                         false, NULL, 0);
161                 if (ret) {
162                         hns3_err(hw, "Failed to remove mac addr for vf: %d",
163                                  ret);
164                         break;
165                 }
166         }
167         return ret;
168 }
169
170 static int
171 hns3vf_add_mc_mac_addr(struct hns3_adapter *hns,
172                        struct rte_ether_addr *mac_addr)
173 {
174         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
175         struct hns3_hw *hw = &hns->hw;
176         int ret;
177
178         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
179                                 HNS3_MBX_MAC_VLAN_MC_ADD,
180                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
181                                 NULL, 0);
182         if (ret) {
183                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
184                                       mac_addr);
185                 hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
186                          mac_str, ret);
187                 return ret;
188         }
189
190         return 0;
191 }
192
193 static int
194 hns3vf_remove_mc_mac_addr(struct hns3_adapter *hns,
195                           struct rte_ether_addr *mac_addr)
196 {
197         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
198         struct hns3_hw *hw = &hns->hw;
199         int ret;
200
201         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST,
202                                 HNS3_MBX_MAC_VLAN_MC_REMOVE,
203                                 mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false,
204                                 NULL, 0);
205         if (ret) {
206                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
207                                       mac_addr);
208                 hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
209                          mac_str, ret);
210                 return ret;
211         }
212
213         return 0;
214 }
215
216 static int
217 hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev,
218                             struct rte_ether_addr *mc_addr_set,
219                             uint32_t nb_mc_addr)
220 {
221         struct hns3_adapter *hns = dev->data->dev_private;
222         struct hns3_hw *hw = &hns->hw;
223         struct rte_ether_addr *addr;
224         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
225         int cur_addr_num;
226         int set_addr_num;
227         int num;
228         int ret;
229         int i;
230
231         if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
232                 hns3_err(hw, "Failed to set mc mac addr, nb_mc_addr(%d) "
233                          "invalid. valid range: 0~%d",
234                          nb_mc_addr, HNS3_MC_MACADDR_NUM);
235                 return -EINVAL;
236         }
237
238         set_addr_num = (int)nb_mc_addr;
239         for (i = 0; i < set_addr_num; i++) {
240                 addr = &mc_addr_set[i];
241                 if (!rte_is_multicast_ether_addr(addr)) {
242                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
243                                               addr);
244                         hns3_err(hw,
245                                  "Failed to set mc mac addr, addr(%s) invalid.",
246                                  mac_str);
247                         return -EINVAL;
248                 }
249         }
250         rte_spinlock_lock(&hw->lock);
251         cur_addr_num = hw->mc_addrs_num;
252         for (i = 0; i < cur_addr_num; i++) {
253                 num = cur_addr_num - i - 1;
254                 addr = &hw->mc_addrs[num];
255                 ret = hns3vf_remove_mc_mac_addr(hns, addr);
256                 if (ret) {
257                         rte_spinlock_unlock(&hw->lock);
258                         return ret;
259                 }
260
261                 hw->mc_addrs_num--;
262         }
263
264         for (i = 0; i < set_addr_num; i++) {
265                 addr = &mc_addr_set[i];
266                 ret = hns3vf_add_mc_mac_addr(hns, addr);
267                 if (ret) {
268                         rte_spinlock_unlock(&hw->lock);
269                         return ret;
270                 }
271
272                 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
273                 hw->mc_addrs_num++;
274         }
275         rte_spinlock_unlock(&hw->lock);
276
277         return 0;
278 }
279
280 static int
281 hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
282 {
283         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
284         struct hns3_hw *hw = &hns->hw;
285         struct rte_ether_addr *addr;
286         int err = 0;
287         int ret;
288         int i;
289
290         for (i = 0; i < hw->mc_addrs_num; i++) {
291                 addr = &hw->mc_addrs[i];
292                 if (!rte_is_multicast_ether_addr(addr))
293                         continue;
294                 if (del)
295                         ret = hns3vf_remove_mc_mac_addr(hns, addr);
296                 else
297                         ret = hns3vf_add_mc_mac_addr(hns, addr);
298                 if (ret) {
299                         err = ret;
300                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
301                                               addr);
302                         hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d",
303                                  del ? "Remove" : "Restore", mac_str, ret);
304                 }
305         }
306         return err;
307 }
308
309 static int
310 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc)
311 {
312         struct hns3_mbx_vf_to_pf_cmd *req;
313         struct hns3_cmd_desc desc;
314         int ret;
315
316         req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
317
318         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
319         req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
320         req->msg[1] = en_bc_pmc ? 1 : 0;
321
322         ret = hns3_cmd_send(hw, &desc, 1);
323         if (ret)
324                 hns3_err(hw, "Set promisc mode fail, status is %d", ret);
325
326         return ret;
327 }
328
329 static int
330 hns3vf_dev_configure(struct rte_eth_dev *dev)
331 {
332         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
333         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
334         struct rte_eth_conf *conf = &dev->data->dev_conf;
335         enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
336         uint16_t nb_rx_q = dev->data->nb_rx_queues;
337         uint16_t nb_tx_q = dev->data->nb_tx_queues;
338         struct rte_eth_rss_conf rss_conf;
339         uint16_t mtu;
340         int ret;
341
342         /*
343          * Hardware does not support where the number of rx and tx queues is
344          * not equal in hip08.
345          */
346         if (nb_rx_q != nb_tx_q) {
347                 hns3_err(hw,
348                          "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
349                          "Hardware does not support this configuration!",
350                          nb_rx_q, nb_tx_q);
351                 return -EINVAL;
352         }
353
354         if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
355                 hns3_err(hw, "setting link speed/duplex not supported");
356                 return -EINVAL;
357         }
358
359         hw->adapter_state = HNS3_NIC_CONFIGURING;
360
361         /* When RSS is not configured, redirect the packet queue 0 */
362         if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
363                 rss_conf = conf->rx_adv_conf.rss_conf;
364                 if (rss_conf.rss_key == NULL) {
365                         rss_conf.rss_key = rss_cfg->key;
366                         rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
367                 }
368
369                 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
370                 if (ret)
371                         goto cfg_err;
372         }
373
374         /*
375          * If jumbo frames are enabled, MTU needs to be refreshed
376          * according to the maximum RX packet length.
377          */
378         if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
379                 /*
380                  * Security of max_rx_pkt_len is guaranteed in dpdk frame.
381                  * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
382                  * can safely assign to "uint16_t" type variable.
383                  */
384                 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
385                 ret = hns3vf_dev_mtu_set(dev, mtu);
386                 if (ret)
387                         goto cfg_err;
388                 dev->data->mtu = mtu;
389         }
390
391         ret = hns3vf_dev_configure_vlan(dev);
392         if (ret)
393                 goto cfg_err;
394
395         hw->adapter_state = HNS3_NIC_CONFIGURED;
396         return 0;
397
398 cfg_err:
399         hw->adapter_state = HNS3_NIC_INITIALIZED;
400         return ret;
401 }
402
403 static int
404 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
405 {
406         int ret;
407
408         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu,
409                                 sizeof(mtu), true, NULL, 0);
410         if (ret)
411                 hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
412
413         return ret;
414 }
415
416 static int
417 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
418 {
419         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
420         uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
421         int ret;
422
423         if (dev->data->dev_started) {
424                 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
425                          "before configuration", dev->data->port_id);
426                 return -EBUSY;
427         }
428
429         rte_spinlock_lock(&hw->lock);
430         ret = hns3vf_config_mtu(hw, mtu);
431         if (ret) {
432                 rte_spinlock_unlock(&hw->lock);
433                 return ret;
434         }
435         if (frame_size > RTE_ETHER_MAX_LEN)
436                 dev->data->dev_conf.rxmode.offloads |=
437                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
438         else
439                 dev->data->dev_conf.rxmode.offloads &=
440                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
441         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
442         rte_spinlock_unlock(&hw->lock);
443
444         return 0;
445 }
446
447 static int
448 hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
449 {
450         struct hns3_adapter *hns = eth_dev->data->dev_private;
451         struct hns3_hw *hw = &hns->hw;
452
453         info->max_rx_queues = hw->tqps_num;
454         info->max_tx_queues = hw->tqps_num;
455         info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
456         info->min_rx_bufsize = hw->rx_buf_len;
457         info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
458         info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
459
460         info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
461                                  DEV_RX_OFFLOAD_UDP_CKSUM |
462                                  DEV_RX_OFFLOAD_TCP_CKSUM |
463                                  DEV_RX_OFFLOAD_SCTP_CKSUM |
464                                  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
465                                  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
466                                  DEV_RX_OFFLOAD_KEEP_CRC |
467                                  DEV_RX_OFFLOAD_SCATTER |
468                                  DEV_RX_OFFLOAD_VLAN_STRIP |
469                                  DEV_RX_OFFLOAD_QINQ_STRIP |
470                                  DEV_RX_OFFLOAD_VLAN_FILTER |
471                                  DEV_RX_OFFLOAD_JUMBO_FRAME);
472         info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
473         info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
474                                  DEV_TX_OFFLOAD_IPV4_CKSUM |
475                                  DEV_TX_OFFLOAD_TCP_CKSUM |
476                                  DEV_TX_OFFLOAD_UDP_CKSUM |
477                                  DEV_TX_OFFLOAD_SCTP_CKSUM |
478                                  DEV_TX_OFFLOAD_VLAN_INSERT |
479                                  DEV_TX_OFFLOAD_QINQ_INSERT |
480                                  DEV_TX_OFFLOAD_MULTI_SEGS |
481                                  info->tx_queue_offload_capa);
482
483         info->rx_desc_lim = (struct rte_eth_desc_lim) {
484                 .nb_max = HNS3_MAX_RING_DESC,
485                 .nb_min = HNS3_MIN_RING_DESC,
486                 .nb_align = HNS3_ALIGN_RING_DESC,
487         };
488
489         info->tx_desc_lim = (struct rte_eth_desc_lim) {
490                 .nb_max = HNS3_MAX_RING_DESC,
491                 .nb_min = HNS3_MIN_RING_DESC,
492                 .nb_align = HNS3_ALIGN_RING_DESC,
493         };
494
495         info->vmdq_queue_num = 0;
496
497         info->reta_size = HNS3_RSS_IND_TBL_SIZE;
498         info->hash_key_size = HNS3_RSS_KEY_SIZE;
499         info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
500         info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
501         info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
502
503         return 0;
504 }
505
506 static void
507 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
508 {
509         hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
510 }
511
512 static void
513 hns3vf_disable_irq0(struct hns3_hw *hw)
514 {
515         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
516 }
517
518 static void
519 hns3vf_enable_irq0(struct hns3_hw *hw)
520 {
521         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
522 }
523
524 static enum hns3vf_evt_cause
525 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
526 {
527         struct hns3_hw *hw = &hns->hw;
528         enum hns3vf_evt_cause ret;
529         uint32_t cmdq_stat_reg;
530         uint32_t val;
531
532         /* Fetch the events from their corresponding regs */
533         cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
534
535         /* Check for vector0 mailbox(=CMDQ RX) event source */
536         if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
537                 val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
538                 ret = HNS3VF_VECTOR0_EVENT_MBX;
539                 goto out;
540         }
541
542         val = 0;
543         ret = HNS3VF_VECTOR0_EVENT_OTHER;
544 out:
545         if (clearval)
546                 *clearval = val;
547         return ret;
548 }
549
550 static void
551 hns3vf_interrupt_handler(void *param)
552 {
553         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
554         struct hns3_adapter *hns = dev->data->dev_private;
555         struct hns3_hw *hw = &hns->hw;
556         enum hns3vf_evt_cause event_cause;
557         uint32_t clearval;
558
559         /* Disable interrupt */
560         hns3vf_disable_irq0(hw);
561
562         /* Read out interrupt causes */
563         event_cause = hns3vf_check_event_cause(hns, &clearval);
564
565         switch (event_cause) {
566         case HNS3VF_VECTOR0_EVENT_MBX:
567                 hns3_dev_handle_mbx_msg(hw);
568                 break;
569         default:
570                 break;
571         }
572
573         /* Clear interrupt causes */
574         hns3vf_clear_event_cause(hw, clearval);
575
576         /* Enable interrupt */
577         hns3vf_enable_irq0(hw);
578 }
579
580 static int
581 hns3vf_check_tqp_info(struct hns3_hw *hw)
582 {
583         uint16_t tqps_num;
584
585         tqps_num = hw->tqps_num;
586         if (tqps_num > HNS3_MAX_TQP_NUM_PER_FUNC || tqps_num == 0) {
587                 PMD_INIT_LOG(ERR, "Get invalid tqps_num(%u) from PF. valid "
588                                   "range: 1~%d",
589                              tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
590                 return -EINVAL;
591         }
592
593         if (hw->rx_buf_len == 0)
594                 hw->rx_buf_len = HNS3_DEFAULT_RX_BUF_LEN;
595         hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, hw->tqps_num);
596
597         return 0;
598 }
599
600 static int
601 hns3vf_get_queue_info(struct hns3_hw *hw)
602 {
603 #define HNS3VF_TQPS_RSS_INFO_LEN        6
604         uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
605         int ret;
606
607         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true,
608                                 resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
609         if (ret) {
610                 PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
611                 return ret;
612         }
613
614         memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
615         memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
616         memcpy(&hw->rx_buf_len, &resp_msg[4], sizeof(uint16_t));
617
618         return hns3vf_check_tqp_info(hw);
619 }
620
621 static int
622 hns3vf_get_queue_depth(struct hns3_hw *hw)
623 {
624 #define HNS3VF_TQPS_DEPTH_INFO_LEN      4
625         uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN];
626         int ret;
627
628         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true,
629                                 resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN);
630         if (ret) {
631                 PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d",
632                              ret);
633                 return ret;
634         }
635
636         memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t));
637         memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t));
638
639         return 0;
640 }
641
642 static int
643 hns3vf_get_tc_info(struct hns3_hw *hw)
644 {
645         uint8_t resp_msg;
646         int ret;
647
648         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0,
649                                 true, &resp_msg, sizeof(resp_msg));
650         if (ret) {
651                 hns3_err(hw, "VF request to get TC info from PF failed %d",
652                          ret);
653                 return ret;
654         }
655
656         hw->hw_tc_map = resp_msg;
657
658         return 0;
659 }
660
661 static int
662 hns3vf_get_configuration(struct hns3_hw *hw)
663 {
664         int ret;
665
666         hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
667
668         /* Get queue configuration from PF */
669         ret = hns3vf_get_queue_info(hw);
670         if (ret)
671                 return ret;
672
673         /* Get queue depth info from PF */
674         ret = hns3vf_get_queue_depth(hw);
675         if (ret)
676                 return ret;
677
678         /* Get tc configuration from PF */
679         return hns3vf_get_tc_info(hw);
680 }
681
682 static void
683 hns3vf_set_tc_info(struct hns3_adapter *hns)
684 {
685         struct hns3_hw *hw = &hns->hw;
686         uint16_t nb_rx_q = hw->data->nb_rx_queues;
687         uint16_t new_tqps;
688         uint8_t i;
689
690         hw->num_tc = 0;
691         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
692                 if (hw->hw_tc_map & BIT(i))
693                         hw->num_tc++;
694
695         new_tqps = RTE_MIN(hw->tqps_num, nb_rx_q);
696         hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, new_tqps / hw->num_tc);
697         hw->alloc_tqps = hw->alloc_rss_size * hw->num_tc;
698
699         hns3_tc_queue_mapping_cfg(hw);
700 }
701
702 static void
703 hns3vf_request_link_info(struct hns3_hw *hw)
704 {
705         uint8_t resp_msg;
706         int ret;
707
708         if (rte_atomic16_read(&hw->reset.resetting))
709                 return;
710         ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
711                                 &resp_msg, sizeof(resp_msg));
712         if (ret)
713                 hns3_err(hw, "Failed to fetch link status from PF: %d", ret);
714 }
715
716 static int
717 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
718 {
719 #define HNS3VF_VLAN_MBX_MSG_LEN 5
720         struct hns3_hw *hw = &hns->hw;
721         uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN];
722         uint16_t proto = htons(RTE_ETHER_TYPE_VLAN);
723         uint8_t is_kill = on ? 0 : 1;
724
725         msg_data[0] = is_kill;
726         memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
727         memcpy(&msg_data[3], &proto, sizeof(proto));
728
729         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER,
730                                  msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL,
731                                  0);
732 }
733
734 static int
735 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
736 {
737         struct hns3_adapter *hns = dev->data->dev_private;
738         struct hns3_hw *hw = &hns->hw;
739         int ret;
740
741         rte_spinlock_lock(&hw->lock);
742         ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
743         rte_spinlock_unlock(&hw->lock);
744         if (ret)
745                 hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
746                          vlan_id, ret);
747
748         return ret;
749 }
750
751 static int
752 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
753 {
754         uint8_t msg_data;
755         int ret;
756
757         msg_data = enable ? 1 : 0;
758         ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG,
759                                 &msg_data, sizeof(msg_data), false, NULL, 0);
760         if (ret)
761                 hns3_err(hw, "vf enable strip failed, ret =%d", ret);
762
763         return ret;
764 }
765
766 static int
767 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
768 {
769         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
770         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
771         unsigned int tmp_mask;
772
773         tmp_mask = (unsigned int)mask;
774         /* Vlan stripping setting */
775         if (tmp_mask & ETH_VLAN_STRIP_MASK) {
776                 rte_spinlock_lock(&hw->lock);
777                 /* Enable or disable VLAN stripping */
778                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
779                         hns3vf_en_hw_strip_rxvtag(hw, true);
780                 else
781                         hns3vf_en_hw_strip_rxvtag(hw, false);
782                 rte_spinlock_unlock(&hw->lock);
783         }
784
785         return 0;
786 }
787
788 static int
789 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
790 {
791         struct hns3_adapter *hns = dev->data->dev_private;
792         struct rte_eth_dev_data *data = dev->data;
793         struct hns3_hw *hw = &hns->hw;
794         int ret;
795
796         if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
797             data->dev_conf.txmode.hw_vlan_reject_untagged ||
798             data->dev_conf.txmode.hw_vlan_insert_pvid) {
799                 hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
800                               "or hw_vlan_insert_pvid is not support!");
801         }
802
803         /* Apply vlan offload setting */
804         ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
805         if (ret)
806                 hns3_err(hw, "dev config vlan offload failed, ret =%d", ret);
807
808         return ret;
809 }
810
811 static int
812 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
813 {
814         uint8_t msg_data;
815
816         msg_data = alive ? 1 : 0;
817         return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data,
818                                  sizeof(msg_data), false, NULL, 0);
819 }
820
821 static void
822 hns3vf_keep_alive_handler(void *param)
823 {
824         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
825         struct hns3_adapter *hns = eth_dev->data->dev_private;
826         struct hns3_hw *hw = &hns->hw;
827         uint8_t respmsg;
828         int ret;
829
830         ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0,
831                                 false, &respmsg, sizeof(uint8_t));
832         if (ret)
833                 hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
834                          ret);
835
836         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
837                           eth_dev);
838 }
839
840 static void
841 hns3vf_service_handler(void *param)
842 {
843         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
844         struct hns3_adapter *hns = eth_dev->data->dev_private;
845         struct hns3_hw *hw = &hns->hw;
846
847         hns3vf_request_link_info(hw);
848
849         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
850                           eth_dev);
851 }
852
853 static int
854 hns3vf_init_hardware(struct hns3_adapter *hns)
855 {
856         struct hns3_hw *hw = &hns->hw;
857         uint16_t mtu = hw->data->mtu;
858         int ret;
859
860         ret = hns3vf_set_promisc_mode(hw, true);
861         if (ret)
862                 return ret;
863
864         ret = hns3vf_config_mtu(hw, mtu);
865         if (ret)
866                 goto err_init_hardware;
867
868         ret = hns3vf_vlan_filter_configure(hns, 0, 1);
869         if (ret) {
870                 PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
871                 goto err_init_hardware;
872         }
873
874         ret = hns3_config_gro(hw, false);
875         if (ret) {
876                 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
877                 goto err_init_hardware;
878         }
879
880         ret = hns3vf_set_alive(hw, true);
881         if (ret) {
882                 PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
883                 goto err_init_hardware;
884         }
885
886         hns3vf_request_link_info(hw);
887         return 0;
888
889 err_init_hardware:
890         (void)hns3vf_set_promisc_mode(hw, false);
891         return ret;
892 }
893
894 static int
895 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
896 {
897         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
898         struct hns3_adapter *hns = eth_dev->data->dev_private;
899         struct hns3_hw *hw = &hns->hw;
900         int ret;
901
902         PMD_INIT_FUNC_TRACE();
903
904         /* Get hardware io base address from pcie BAR2 IO space */
905         hw->io_base = pci_dev->mem_resource[2].addr;
906
907         /* Firmware command queue initialize */
908         ret = hns3_cmd_init_queue(hw);
909         if (ret) {
910                 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
911                 goto err_cmd_init_queue;
912         }
913
914         /* Firmware command initialize */
915         ret = hns3_cmd_init(hw);
916         if (ret) {
917                 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
918                 goto err_cmd_init;
919         }
920
921         rte_spinlock_init(&hw->mbx_resp.lock);
922
923         hns3vf_clear_event_cause(hw, 0);
924
925         ret = rte_intr_callback_register(&pci_dev->intr_handle,
926                                          hns3vf_interrupt_handler, eth_dev);
927         if (ret) {
928                 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
929                 goto err_intr_callback_register;
930         }
931
932         /* Enable interrupt */
933         rte_intr_enable(&pci_dev->intr_handle);
934         hns3vf_enable_irq0(hw);
935
936         /* Get configuration from PF */
937         ret = hns3vf_get_configuration(hw);
938         if (ret) {
939                 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
940                 goto err_get_config;
941         }
942
943         rte_eth_random_addr(hw->mac.mac_addr); /* Generate a random mac addr */
944
945         ret = hns3vf_init_hardware(hns);
946         if (ret)
947                 goto err_get_config;
948
949         hns3_set_default_rss_args(hw);
950
951         return 0;
952
953 err_get_config:
954         hns3vf_disable_irq0(hw);
955         rte_intr_disable(&pci_dev->intr_handle);
956
957 err_intr_callback_register:
958         hns3_cmd_uninit(hw);
959
960 err_cmd_init:
961         hns3_cmd_destroy_queue(hw);
962
963 err_cmd_init_queue:
964         hw->io_base = NULL;
965
966         return ret;
967 }
968
969 static void
970 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
971 {
972         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
973         struct hns3_adapter *hns = eth_dev->data->dev_private;
974         struct hns3_hw *hw = &hns->hw;
975
976         PMD_INIT_FUNC_TRACE();
977
978         hns3_rss_uninit(hns);
979         (void)hns3vf_set_alive(hw, false);
980         (void)hns3vf_set_promisc_mode(hw, false);
981         hns3vf_disable_irq0(hw);
982         rte_intr_disable(&pci_dev->intr_handle);
983         hns3_cmd_uninit(hw);
984         hns3_cmd_destroy_queue(hw);
985         hw->io_base = NULL;
986 }
987
988 static int
989 hns3vf_do_stop(struct hns3_adapter *hns)
990 {
991         struct hns3_hw *hw = &hns->hw;
992
993         hw->mac.link_status = ETH_LINK_DOWN;
994
995         hns3vf_configure_mac_addr(hns, true);
996
997         return 0;
998 }
999
1000 static void
1001 hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
1002 {
1003         struct hns3_adapter *hns = eth_dev->data->dev_private;
1004         struct hns3_hw *hw = &hns->hw;
1005
1006         PMD_INIT_FUNC_TRACE();
1007
1008         hw->adapter_state = HNS3_NIC_STOPPING;
1009         hns3_set_rxtx_function(eth_dev);
1010
1011         rte_spinlock_lock(&hw->lock);
1012         hns3vf_do_stop(hns);
1013         hns3_dev_release_mbufs(hns);
1014         hw->adapter_state = HNS3_NIC_CONFIGURED;
1015         rte_spinlock_unlock(&hw->lock);
1016 }
1017
1018 static void
1019 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1020 {
1021         struct hns3_adapter *hns = eth_dev->data->dev_private;
1022         struct hns3_hw *hw = &hns->hw;
1023
1024         if (hw->adapter_state == HNS3_NIC_STARTED)
1025                 hns3vf_dev_stop(eth_dev);
1026
1027         hw->adapter_state = HNS3_NIC_CLOSING;
1028         rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1029         rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
1030         hns3vf_configure_all_mc_mac_addr(hns, true);
1031         hns3vf_uninit_vf(eth_dev);
1032         hns3_free_all_queues(eth_dev);
1033         rte_free(eth_dev->process_private);
1034         eth_dev->process_private = NULL;
1035         hw->adapter_state = HNS3_NIC_CLOSED;
1036         hns3_warn(hw, "Close port %d finished", hw->data->port_id);
1037 }
1038
1039 static int
1040 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1041                        __rte_unused int wait_to_complete)
1042 {
1043         struct hns3_adapter *hns = eth_dev->data->dev_private;
1044         struct hns3_hw *hw = &hns->hw;
1045         struct hns3_mac *mac = &hw->mac;
1046         struct rte_eth_link new_link;
1047
1048         hns3vf_request_link_info(hw);
1049
1050         memset(&new_link, 0, sizeof(new_link));
1051         switch (mac->link_speed) {
1052         case ETH_SPEED_NUM_10M:
1053         case ETH_SPEED_NUM_100M:
1054         case ETH_SPEED_NUM_1G:
1055         case ETH_SPEED_NUM_10G:
1056         case ETH_SPEED_NUM_25G:
1057         case ETH_SPEED_NUM_40G:
1058         case ETH_SPEED_NUM_50G:
1059         case ETH_SPEED_NUM_100G:
1060                 new_link.link_speed = mac->link_speed;
1061                 break;
1062         default:
1063                 new_link.link_speed = ETH_SPEED_NUM_100M;
1064                 break;
1065         }
1066
1067         new_link.link_duplex = mac->link_duplex;
1068         new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
1069         new_link.link_autoneg =
1070             !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
1071
1072         return rte_eth_linkstatus_set(eth_dev, &new_link);
1073 }
1074
1075 static int
1076 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1077 {
1078         struct hns3_hw *hw = &hns->hw;
1079         int ret;
1080
1081         hns3vf_set_tc_info(hns);
1082
1083         ret = hns3_start_queues(hns, reset_queue);
1084         if (ret) {
1085                 hns3_err(hw, "Failed to start queues: %d", ret);
1086                 return ret;
1087         }
1088
1089         return 0;
1090 }
1091
1092 static int
1093 hns3vf_dev_start(struct rte_eth_dev *eth_dev)
1094 {
1095         struct hns3_adapter *hns = eth_dev->data->dev_private;
1096         struct hns3_hw *hw = &hns->hw;
1097         int ret;
1098
1099         PMD_INIT_FUNC_TRACE();
1100         rte_spinlock_lock(&hw->lock);
1101         hw->adapter_state = HNS3_NIC_STARTING;
1102         ret = hns3vf_do_start(hns, true);
1103         if (ret) {
1104                 hw->adapter_state = HNS3_NIC_CONFIGURED;
1105                 rte_spinlock_unlock(&hw->lock);
1106                 return ret;
1107         }
1108         hw->adapter_state = HNS3_NIC_STARTED;
1109         rte_spinlock_unlock(&hw->lock);
1110         hns3_set_rxtx_function(eth_dev);
1111         return 0;
1112 }
1113
1114 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
1115         .dev_start          = hns3vf_dev_start,
1116         .dev_stop           = hns3vf_dev_stop,
1117         .dev_close          = hns3vf_dev_close,
1118         .mtu_set            = hns3vf_dev_mtu_set,
1119         .dev_infos_get      = hns3vf_dev_infos_get,
1120         .rx_queue_setup     = hns3_rx_queue_setup,
1121         .tx_queue_setup     = hns3_tx_queue_setup,
1122         .rx_queue_release   = hns3_dev_rx_queue_release,
1123         .tx_queue_release   = hns3_dev_tx_queue_release,
1124         .dev_configure      = hns3vf_dev_configure,
1125         .mac_addr_add       = hns3vf_add_mac_addr,
1126         .mac_addr_remove    = hns3vf_remove_mac_addr,
1127         .mac_addr_set       = hns3vf_set_default_mac_addr,
1128         .set_mc_addr_list   = hns3vf_set_mc_mac_addr_list,
1129         .link_update        = hns3vf_dev_link_update,
1130         .rss_hash_update    = hns3_dev_rss_hash_update,
1131         .rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
1132         .reta_update        = hns3_dev_rss_reta_update,
1133         .reta_query         = hns3_dev_rss_reta_query,
1134         .filter_ctrl        = hns3_dev_filter_ctrl,
1135         .vlan_filter_set    = hns3vf_vlan_filter_set,
1136         .vlan_offload_set   = hns3vf_vlan_offload_set,
1137         .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
1138 };
1139
1140 static int
1141 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
1142 {
1143         struct hns3_adapter *hns = eth_dev->data->dev_private;
1144         struct hns3_hw *hw = &hns->hw;
1145         int ret;
1146
1147         PMD_INIT_FUNC_TRACE();
1148
1149         eth_dev->process_private = (struct hns3_process_private *)
1150             rte_zmalloc_socket("hns3_filter_list",
1151                                sizeof(struct hns3_process_private),
1152                                RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
1153         if (eth_dev->process_private == NULL) {
1154                 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
1155                 return -ENOMEM;
1156         }
1157
1158         /* initialize flow filter lists */
1159         hns3_filterlist_init(eth_dev);
1160
1161         hns3_set_rxtx_function(eth_dev);
1162         eth_dev->dev_ops = &hns3vf_eth_dev_ops;
1163         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1164                 return 0;
1165
1166         hw->adapter_state = HNS3_NIC_UNINITIALIZED;
1167         hns->is_vf = true;
1168         hw->data = eth_dev->data;
1169
1170         ret = hns3vf_init_vf(eth_dev);
1171         if (ret) {
1172                 PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
1173                 goto err_init_vf;
1174         }
1175
1176         /* Allocate memory for storing MAC addresses */
1177         eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac",
1178                                                sizeof(struct rte_ether_addr) *
1179                                                HNS3_VF_UC_MACADDR_NUM, 0);
1180         if (eth_dev->data->mac_addrs == NULL) {
1181                 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
1182                              "to store MAC addresses",
1183                              sizeof(struct rte_ether_addr) *
1184                              HNS3_VF_UC_MACADDR_NUM);
1185                 ret = -ENOMEM;
1186                 goto err_rte_zmalloc;
1187         }
1188
1189         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
1190                             &eth_dev->data->mac_addrs[0]);
1191         hw->adapter_state = HNS3_NIC_INITIALIZED;
1192         /*
1193          * Pass the information to the rte_eth_dev_close() that it should also
1194          * release the private port resources.
1195          */
1196         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1197
1198         rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1199                           eth_dev);
1200         rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1201                           eth_dev);
1202         return 0;
1203
1204 err_rte_zmalloc:
1205         hns3vf_uninit_vf(eth_dev);
1206
1207 err_init_vf:
1208         eth_dev->dev_ops = NULL;
1209         eth_dev->rx_pkt_burst = NULL;
1210         eth_dev->tx_pkt_burst = NULL;
1211         eth_dev->tx_pkt_prepare = NULL;
1212         rte_free(eth_dev->process_private);
1213         eth_dev->process_private = NULL;
1214
1215         return ret;
1216 }
1217
1218 static int
1219 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
1220 {
1221         struct hns3_adapter *hns = eth_dev->data->dev_private;
1222         struct hns3_hw *hw = &hns->hw;
1223
1224         PMD_INIT_FUNC_TRACE();
1225
1226         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1227                 return -EPERM;
1228
1229         eth_dev->dev_ops = NULL;
1230         eth_dev->rx_pkt_burst = NULL;
1231         eth_dev->tx_pkt_burst = NULL;
1232         eth_dev->tx_pkt_prepare = NULL;
1233
1234         if (hw->adapter_state < HNS3_NIC_CLOSING)
1235                 hns3vf_dev_close(eth_dev);
1236
1237         hw->adapter_state = HNS3_NIC_REMOVED;
1238         return 0;
1239 }
1240
1241 static int
1242 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1243                      struct rte_pci_device *pci_dev)
1244 {
1245         return rte_eth_dev_pci_generic_probe(pci_dev,
1246                                              sizeof(struct hns3_adapter),
1247                                              hns3vf_dev_init);
1248 }
1249
1250 static int
1251 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
1252 {
1253         return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
1254 }
1255
1256 static const struct rte_pci_id pci_id_hns3vf_map[] = {
1257         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
1258         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
1259         { .vendor_id = 0, /* sentinel */ },
1260 };
1261
1262 static struct rte_pci_driver rte_hns3vf_pmd = {
1263         .id_table = pci_id_hns3vf_map,
1264         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1265         .probe = eth_hns3vf_pci_probe,
1266         .remove = eth_hns3vf_pci_remove,
1267 };
1268
1269 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
1270 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
1271 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");