722660d0cc5a513cde124630456900b858858d6a
[dpdk.git] / drivers / net / hns3 / hns3_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <rte_alarm.h>
6 #include <rte_bus_pci.h>
7 #include <ethdev_pci.h>
8 #include <rte_pci.h>
9
10 #include "hns3_ethdev.h"
11 #include "hns3_common.h"
12 #include "hns3_logs.h"
13 #include "hns3_rxtx.h"
14 #include "hns3_intr.h"
15 #include "hns3_regs.h"
16 #include "hns3_dcb.h"
17 #include "hns3_mp.h"
18 #include "hns3_flow.h"
19
20 #define HNS3_SERVICE_INTERVAL           1000000 /* us */
21 #define HNS3_SERVICE_QUICK_INTERVAL     10
22 #define HNS3_INVALID_PVID               0xFFFF
23
24 #define HNS3_FILTER_TYPE_VF             0
25 #define HNS3_FILTER_TYPE_PORT           1
26 #define HNS3_FILTER_FE_EGRESS_V1_B      BIT(0)
27 #define HNS3_FILTER_FE_NIC_INGRESS_B    BIT(0)
28 #define HNS3_FILTER_FE_NIC_EGRESS_B     BIT(1)
29 #define HNS3_FILTER_FE_ROCE_INGRESS_B   BIT(2)
30 #define HNS3_FILTER_FE_ROCE_EGRESS_B    BIT(3)
31 #define HNS3_FILTER_FE_EGRESS           (HNS3_FILTER_FE_NIC_EGRESS_B \
32                                         | HNS3_FILTER_FE_ROCE_EGRESS_B)
33 #define HNS3_FILTER_FE_INGRESS          (HNS3_FILTER_FE_NIC_INGRESS_B \
34                                         | HNS3_FILTER_FE_ROCE_INGRESS_B)
35
36 /* Reset related Registers */
37 #define HNS3_GLOBAL_RESET_BIT           0
38 #define HNS3_CORE_RESET_BIT             1
39 #define HNS3_IMP_RESET_BIT              2
40 #define HNS3_FUN_RST_ING_B              0
41
42 #define HNS3_VECTOR0_IMP_RESET_INT_B    1
43 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B     4U
44 #define HNS3_VECTOR0_IMP_RD_POISON_B    5U
45 #define HNS3_VECTOR0_ALL_MSIX_ERR_B     6U
46
47 #define HNS3_RESET_WAIT_MS      100
48 #define HNS3_RESET_WAIT_CNT     200
49
50 /* FEC mode order defined in HNS3 hardware */
51 #define HNS3_HW_FEC_MODE_NOFEC  0
52 #define HNS3_HW_FEC_MODE_BASER  1
53 #define HNS3_HW_FEC_MODE_RS     2
54
55 enum hns3_evt_cause {
56         HNS3_VECTOR0_EVENT_RST,
57         HNS3_VECTOR0_EVENT_MBX,
58         HNS3_VECTOR0_EVENT_ERR,
59         HNS3_VECTOR0_EVENT_PTP,
60         HNS3_VECTOR0_EVENT_OTHER,
61 };
62
63 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
64         { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
65                              RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
66                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
67
68         { RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
69                              RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
70                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
71                              RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
72
73         { RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
74                              RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
75                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
76
77         { RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
78                              RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
79                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
80                              RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
81
82         { RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
83                               RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
84                               RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
85
86         { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
87                               RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
88                               RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
89 };
90
91 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
92                                                  uint64_t *levels);
93 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
94 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
95                                     int on);
96 static int hns3_update_link_info(struct rte_eth_dev *eth_dev);
97 static bool hns3_update_link_status(struct hns3_hw *hw);
98
99 static int hns3_add_mc_mac_addr(struct hns3_hw *hw,
100                                 struct rte_ether_addr *mac_addr);
101 static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
102                                    struct rte_ether_addr *mac_addr);
103 static int hns3_restore_fec(struct hns3_hw *hw);
104 static int hns3_query_dev_fec_info(struct hns3_hw *hw);
105 static int hns3_do_stop(struct hns3_adapter *hns);
106 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds);
107 static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable);
108
109
110 static void
111 hns3_pf_disable_irq0(struct hns3_hw *hw)
112 {
113         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
114 }
115
116 static void
117 hns3_pf_enable_irq0(struct hns3_hw *hw)
118 {
119         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
120 }
121
122 static enum hns3_evt_cause
123 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay,
124                           uint32_t *vec_val)
125 {
126         struct hns3_hw *hw = &hns->hw;
127
128         __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
129         hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
130         *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
131         if (!is_delay) {
132                 hw->reset.stats.imp_cnt++;
133                 hns3_warn(hw, "IMP reset detected, clear reset status");
134         } else {
135                 hns3_schedule_delayed_reset(hns);
136                 hns3_warn(hw, "IMP reset detected, don't clear reset status");
137         }
138
139         return HNS3_VECTOR0_EVENT_RST;
140 }
141
142 static enum hns3_evt_cause
143 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay,
144                              uint32_t *vec_val)
145 {
146         struct hns3_hw *hw = &hns->hw;
147
148         __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
149         hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
150         *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
151         if (!is_delay) {
152                 hw->reset.stats.global_cnt++;
153                 hns3_warn(hw, "Global reset detected, clear reset status");
154         } else {
155                 hns3_schedule_delayed_reset(hns);
156                 hns3_warn(hw,
157                           "Global reset detected, don't clear reset status");
158         }
159
160         return HNS3_VECTOR0_EVENT_RST;
161 }
162
163 static enum hns3_evt_cause
164 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
165 {
166         struct hns3_hw *hw = &hns->hw;
167         uint32_t vector0_int_stats;
168         uint32_t cmdq_src_val;
169         uint32_t hw_err_src_reg;
170         uint32_t val;
171         enum hns3_evt_cause ret;
172         bool is_delay;
173
174         /* fetch the events from their corresponding regs */
175         vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
176         cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
177         hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
178
179         is_delay = clearval == NULL ? true : false;
180         /*
181          * Assumption: If by any chance reset and mailbox events are reported
182          * together then we will only process reset event and defer the
183          * processing of the mailbox events. Since, we would have not cleared
184          * RX CMDQ event this time we would receive again another interrupt
185          * from H/W just for the mailbox.
186          */
187         if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */
188                 ret = hns3_proc_imp_reset_event(hns, is_delay, &val);
189                 goto out;
190         }
191
192         /* Global reset */
193         if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) {
194                 ret = hns3_proc_global_reset_event(hns, is_delay, &val);
195                 goto out;
196         }
197
198         /* Check for vector0 1588 event source */
199         if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) {
200                 val = BIT(HNS3_VECTOR0_1588_INT_B);
201                 ret = HNS3_VECTOR0_EVENT_PTP;
202                 goto out;
203         }
204
205         /* check for vector0 msix event source */
206         if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK ||
207             hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) {
208                 val = vector0_int_stats | hw_err_src_reg;
209                 ret = HNS3_VECTOR0_EVENT_ERR;
210                 goto out;
211         }
212
213         /* check for vector0 mailbox(=CMDQ RX) event source */
214         if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) {
215                 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
216                 val = cmdq_src_val;
217                 ret = HNS3_VECTOR0_EVENT_MBX;
218                 goto out;
219         }
220
221         val = vector0_int_stats;
222         ret = HNS3_VECTOR0_EVENT_OTHER;
223 out:
224
225         if (clearval)
226                 *clearval = val;
227         return ret;
228 }
229
230 static void
231 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
232 {
233         if (event_type == HNS3_VECTOR0_EVENT_RST ||
234             event_type == HNS3_VECTOR0_EVENT_PTP)
235                 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
236         else if (event_type == HNS3_VECTOR0_EVENT_MBX)
237                 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
238 }
239
240 static void
241 hns3_clear_all_event_cause(struct hns3_hw *hw)
242 {
243         uint32_t vector0_int_stats;
244
245         vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
246         if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats)
247                 hns3_warn(hw, "Probe during IMP reset interrupt");
248
249         if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats)
250                 hns3_warn(hw, "Probe during Global reset interrupt");
251
252         hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST,
253                                BIT(HNS3_VECTOR0_IMPRESET_INT_B) |
254                                BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) |
255                                BIT(HNS3_VECTOR0_CORERESET_INT_B));
256         hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0);
257         hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP,
258                                 BIT(HNS3_VECTOR0_1588_INT_B));
259 }
260
261 static void
262 hns3_handle_mac_tnl(struct hns3_hw *hw)
263 {
264         struct hns3_cmd_desc desc;
265         uint32_t status;
266         int ret;
267
268         /* query and clear mac tnl interrupt */
269         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true);
270         ret = hns3_cmd_send(hw, &desc, 1);
271         if (ret) {
272                 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret);
273                 return;
274         }
275
276         status = rte_le_to_cpu_32(desc.data[0]);
277         if (status) {
278                 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status);
279                 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT,
280                                           false);
281                 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR);
282                 ret = hns3_cmd_send(hw, &desc, 1);
283                 if (ret)
284                         hns3_err(hw, "failed to clear mac tnl int, ret = %d.",
285                                  ret);
286         }
287 }
288
289 static void
290 hns3_interrupt_handler(void *param)
291 {
292         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
293         struct hns3_adapter *hns = dev->data->dev_private;
294         struct hns3_hw *hw = &hns->hw;
295         enum hns3_evt_cause event_cause;
296         uint32_t clearval = 0;
297         uint32_t vector0_int;
298         uint32_t ras_int;
299         uint32_t cmdq_int;
300
301         /* Disable interrupt */
302         hns3_pf_disable_irq0(hw);
303
304         event_cause = hns3_check_event_cause(hns, &clearval);
305         vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
306         ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
307         cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
308         hns3_clear_event_cause(hw, event_cause, clearval);
309         /* vector 0 interrupt is shared with reset and mailbox source events. */
310         if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
311                 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x "
312                           "ras_int_stat:0x%x cmdq_int_stat:0x%x",
313                           vector0_int, ras_int, cmdq_int);
314                 hns3_handle_mac_tnl(hw);
315                 hns3_handle_error(hns);
316         } else if (event_cause == HNS3_VECTOR0_EVENT_RST) {
317                 hns3_warn(hw, "received reset interrupt");
318                 hns3_schedule_reset(hns);
319         } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) {
320                 hns3_dev_handle_mbx_msg(hw);
321         } else {
322                 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x "
323                           "ras_int_stat:0x%x cmdq_int_stat:0x%x",
324                           vector0_int, ras_int, cmdq_int);
325         }
326
327         /* Enable interrupt if it is not cause by reset */
328         hns3_pf_enable_irq0(hw);
329 }
330
331 static int
332 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on)
333 {
334 #define HNS3_VLAN_ID_OFFSET_STEP        160
335 #define HNS3_VLAN_BYTE_SIZE             8
336         struct hns3_vlan_filter_pf_cfg_cmd *req;
337         struct hns3_hw *hw = &hns->hw;
338         uint8_t vlan_offset_byte_val;
339         struct hns3_cmd_desc desc;
340         uint8_t vlan_offset_byte;
341         uint8_t vlan_offset_base;
342         int ret;
343
344         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false);
345
346         vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP;
347         vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) /
348                            HNS3_VLAN_BYTE_SIZE;
349         vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE);
350
351         req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data;
352         req->vlan_offset = vlan_offset_base;
353         req->vlan_cfg = on ? 0 : 1;
354         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
355
356         ret = hns3_cmd_send(hw, &desc, 1);
357         if (ret)
358                 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d",
359                          vlan_id, ret);
360
361         return ret;
362 }
363
364 static void
365 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id)
366 {
367         struct hns3_user_vlan_table *vlan_entry;
368         struct hns3_pf *pf = &hns->pf;
369
370         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
371                 if (vlan_entry->vlan_id == vlan_id) {
372                         if (vlan_entry->hd_tbl_status)
373                                 hns3_set_port_vlan_filter(hns, vlan_id, 0);
374                         LIST_REMOVE(vlan_entry, next);
375                         rte_free(vlan_entry);
376                         break;
377                 }
378         }
379 }
380
381 static void
382 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
383                         bool writen_to_tbl)
384 {
385         struct hns3_user_vlan_table *vlan_entry;
386         struct hns3_hw *hw = &hns->hw;
387         struct hns3_pf *pf = &hns->pf;
388
389         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
390                 if (vlan_entry->vlan_id == vlan_id)
391                         return;
392         }
393
394         vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0);
395         if (vlan_entry == NULL) {
396                 hns3_err(hw, "Failed to malloc hns3 vlan table");
397                 return;
398         }
399
400         vlan_entry->hd_tbl_status = writen_to_tbl;
401         vlan_entry->vlan_id = vlan_id;
402
403         LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);
404 }
405
406 static int
407 hns3_restore_vlan_table(struct hns3_adapter *hns)
408 {
409         struct hns3_user_vlan_table *vlan_entry;
410         struct hns3_hw *hw = &hns->hw;
411         struct hns3_pf *pf = &hns->pf;
412         uint16_t vlan_id;
413         int ret = 0;
414
415         if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE)
416                 return hns3_vlan_pvid_configure(hns,
417                                                 hw->port_base_vlan_cfg.pvid, 1);
418
419         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
420                 if (vlan_entry->hd_tbl_status) {
421                         vlan_id = vlan_entry->vlan_id;
422                         ret = hns3_set_port_vlan_filter(hns, vlan_id, 1);
423                         if (ret)
424                                 break;
425                 }
426         }
427
428         return ret;
429 }
430
431 static int
432 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
433 {
434         struct hns3_hw *hw = &hns->hw;
435         bool writen_to_tbl = false;
436         int ret = 0;
437
438         /*
439          * When vlan filter is enabled, hardware regards packets without vlan
440          * as packets with vlan 0. So, to receive packets without vlan, vlan id
441          * 0 is not allowed to be removed by rte_eth_dev_vlan_filter.
442          */
443         if (on == 0 && vlan_id == 0)
444                 return 0;
445
446         /*
447          * When port base vlan enabled, we use port base vlan as the vlan
448          * filter condition. In this case, we don't update vlan filter table
449          * when user add new vlan or remove exist vlan, just update the
450          * vlan list. The vlan id in vlan list will be written in vlan filter
451          * table until port base vlan disabled
452          */
453         if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
454                 ret = hns3_set_port_vlan_filter(hns, vlan_id, on);
455                 writen_to_tbl = true;
456         }
457
458         if (ret == 0) {
459                 if (on)
460                         hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);
461                 else
462                         hns3_rm_dev_vlan_table(hns, vlan_id);
463         }
464         return ret;
465 }
466
467 static int
468 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
469 {
470         struct hns3_adapter *hns = dev->data->dev_private;
471         struct hns3_hw *hw = &hns->hw;
472         int ret;
473
474         rte_spinlock_lock(&hw->lock);
475         ret = hns3_vlan_filter_configure(hns, vlan_id, on);
476         rte_spinlock_unlock(&hw->lock);
477         return ret;
478 }
479
480 static int
481 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
482                          uint16_t tpid)
483 {
484         struct hns3_rx_vlan_type_cfg_cmd *rx_req;
485         struct hns3_tx_vlan_type_cfg_cmd *tx_req;
486         struct hns3_hw *hw = &hns->hw;
487         struct hns3_cmd_desc desc;
488         int ret;
489
490         if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
491              vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
492                 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
493                 return -EINVAL;
494         }
495
496         if (tpid != RTE_ETHER_TYPE_VLAN) {
497                 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type);
498                 return -EINVAL;
499         }
500
501         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
502         rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
503
504         if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
505                 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
506                 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
507         } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
508                 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
509                 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
510                 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
511                 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid);
512         }
513
514         ret = hns3_cmd_send(hw, &desc, 1);
515         if (ret) {
516                 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d",
517                          ret);
518                 return ret;
519         }
520
521         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false);
522
523         tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data;
524         tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid);
525         tx_req->in_vlan_type = rte_cpu_to_le_16(tpid);
526
527         ret = hns3_cmd_send(hw, &desc, 1);
528         if (ret)
529                 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d",
530                          ret);
531         return ret;
532 }
533
534 static int
535 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
536                    uint16_t tpid)
537 {
538         struct hns3_adapter *hns = dev->data->dev_private;
539         struct hns3_hw *hw = &hns->hw;
540         int ret;
541
542         rte_spinlock_lock(&hw->lock);
543         ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid);
544         rte_spinlock_unlock(&hw->lock);
545         return ret;
546 }
547
548 static int
549 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
550                              struct hns3_rx_vtag_cfg *vcfg)
551 {
552         struct hns3_vport_vtag_rx_cfg_cmd *req;
553         struct hns3_hw *hw = &hns->hw;
554         struct hns3_cmd_desc desc;
555         uint16_t vport_id;
556         uint8_t bitmap;
557         int ret;
558
559         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false);
560
561         req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data;
562         hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B,
563                      vcfg->strip_tag1_en ? 1 : 0);
564         hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B,
565                      vcfg->strip_tag2_en ? 1 : 0);
566         hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B,
567                      vcfg->vlan1_vlan_prionly ? 1 : 0);
568         hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
569                      vcfg->vlan2_vlan_prionly ? 1 : 0);
570
571         /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
572         hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B,
573                      vcfg->strip_tag1_discard_en ? 1 : 0);
574         hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B,
575                      vcfg->strip_tag2_discard_en ? 1 : 0);
576         /*
577          * In current version VF is not supported when PF is driven by DPDK
578          * driver, just need to configure parameters for PF vport.
579          */
580         vport_id = HNS3_PF_FUNC_ID;
581         req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
582         bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
583         req->vf_bitmap[req->vf_offset] = bitmap;
584
585         ret = hns3_cmd_send(hw, &desc, 1);
586         if (ret)
587                 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret);
588         return ret;
589 }
590
591 static void
592 hns3_update_rx_offload_cfg(struct hns3_adapter *hns,
593                            struct hns3_rx_vtag_cfg *vcfg)
594 {
595         struct hns3_pf *pf = &hns->pf;
596         memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg));
597 }
598
599 static void
600 hns3_update_tx_offload_cfg(struct hns3_adapter *hns,
601                            struct hns3_tx_vtag_cfg *vcfg)
602 {
603         struct hns3_pf *pf = &hns->pf;
604         memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg));
605 }
606
607 static int
608 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
609 {
610         struct hns3_rx_vtag_cfg rxvlan_cfg;
611         struct hns3_hw *hw = &hns->hw;
612         int ret;
613
614         if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
615                 rxvlan_cfg.strip_tag1_en = false;
616                 rxvlan_cfg.strip_tag2_en = enable;
617                 rxvlan_cfg.strip_tag2_discard_en = false;
618         } else {
619                 rxvlan_cfg.strip_tag1_en = enable;
620                 rxvlan_cfg.strip_tag2_en = true;
621                 rxvlan_cfg.strip_tag2_discard_en = true;
622         }
623
624         rxvlan_cfg.strip_tag1_discard_en = false;
625         rxvlan_cfg.vlan1_vlan_prionly = false;
626         rxvlan_cfg.vlan2_vlan_prionly = false;
627         rxvlan_cfg.rx_vlan_offload_en = enable;
628
629         ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
630         if (ret) {
631                 hns3_err(hw, "%s strip rx vtag failed, ret = %d.",
632                                 enable ? "enable" : "disable", ret);
633                 return ret;
634         }
635
636         hns3_update_rx_offload_cfg(hns, &rxvlan_cfg);
637
638         return ret;
639 }
640
641 static int
642 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type,
643                           uint8_t fe_type, bool filter_en, uint8_t vf_id)
644 {
645         struct hns3_vlan_filter_ctrl_cmd *req;
646         struct hns3_cmd_desc desc;
647         int ret;
648
649         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false);
650
651         req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data;
652         req->vlan_type = vlan_type;
653         req->vlan_fe = filter_en ? fe_type : 0;
654         req->vf_id = vf_id;
655
656         ret = hns3_cmd_send(hw, &desc, 1);
657         if (ret)
658                 hns3_err(hw, "set vlan filter fail, ret =%d", ret);
659
660         return ret;
661 }
662
663 static int
664 hns3_vlan_filter_init(struct hns3_adapter *hns)
665 {
666         struct hns3_hw *hw = &hns->hw;
667         int ret;
668
669         ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF,
670                                         HNS3_FILTER_FE_EGRESS, false,
671                                         HNS3_PF_FUNC_ID);
672         if (ret) {
673                 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret);
674                 return ret;
675         }
676
677         ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
678                                         HNS3_FILTER_FE_INGRESS, false,
679                                         HNS3_PF_FUNC_ID);
680         if (ret)
681                 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret);
682
683         return ret;
684 }
685
686 static int
687 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable)
688 {
689         struct hns3_hw *hw = &hns->hw;
690         int ret;
691
692         ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
693                                         HNS3_FILTER_FE_INGRESS, enable,
694                                         HNS3_PF_FUNC_ID);
695         if (ret)
696                 hns3_err(hw, "failed to %s port vlan filter, ret = %d",
697                          enable ? "enable" : "disable", ret);
698
699         return ret;
700 }
701
702 static int
703 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
704 {
705         struct hns3_adapter *hns = dev->data->dev_private;
706         struct hns3_hw *hw = &hns->hw;
707         struct rte_eth_rxmode *rxmode;
708         unsigned int tmp_mask;
709         bool enable;
710         int ret = 0;
711
712         rte_spinlock_lock(&hw->lock);
713         rxmode = &dev->data->dev_conf.rxmode;
714         tmp_mask = (unsigned int)mask;
715         if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
716                 /* ignore vlan filter configuration during promiscuous mode */
717                 if (!dev->data->promiscuous) {
718                         /* Enable or disable VLAN filter */
719                         enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
720                                  true : false;
721
722                         ret = hns3_enable_vlan_filter(hns, enable);
723                         if (ret) {
724                                 rte_spinlock_unlock(&hw->lock);
725                                 hns3_err(hw, "failed to %s rx filter, ret = %d",
726                                          enable ? "enable" : "disable", ret);
727                                 return ret;
728                         }
729                 }
730         }
731
732         if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
733                 /* Enable or disable VLAN stripping */
734                 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
735                     true : false;
736
737                 ret = hns3_en_hw_strip_rxvtag(hns, enable);
738                 if (ret) {
739                         rte_spinlock_unlock(&hw->lock);
740                         hns3_err(hw, "failed to %s rx strip, ret = %d",
741                                  enable ? "enable" : "disable", ret);
742                         return ret;
743                 }
744         }
745
746         rte_spinlock_unlock(&hw->lock);
747
748         return ret;
749 }
750
751 static int
752 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
753                              struct hns3_tx_vtag_cfg *vcfg)
754 {
755         struct hns3_vport_vtag_tx_cfg_cmd *req;
756         struct hns3_cmd_desc desc;
757         struct hns3_hw *hw = &hns->hw;
758         uint16_t vport_id;
759         uint8_t bitmap;
760         int ret;
761
762         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false);
763
764         req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data;
765         req->def_vlan_tag1 = vcfg->default_tag1;
766         req->def_vlan_tag2 = vcfg->default_tag2;
767         hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B,
768                      vcfg->accept_tag1 ? 1 : 0);
769         hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B,
770                      vcfg->accept_untag1 ? 1 : 0);
771         hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B,
772                      vcfg->accept_tag2 ? 1 : 0);
773         hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B,
774                      vcfg->accept_untag2 ? 1 : 0);
775         hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B,
776                      vcfg->insert_tag1_en ? 1 : 0);
777         hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B,
778                      vcfg->insert_tag2_en ? 1 : 0);
779         hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
780
781         /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
782         hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B,
783                      vcfg->tag_shift_mode_en ? 1 : 0);
784
785         /*
786          * In current version VF is not supported when PF is driven by DPDK
787          * driver, just need to configure parameters for PF vport.
788          */
789         vport_id = HNS3_PF_FUNC_ID;
790         req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
791         bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
792         req->vf_bitmap[req->vf_offset] = bitmap;
793
794         ret = hns3_cmd_send(hw, &desc, 1);
795         if (ret)
796                 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret);
797
798         return ret;
799 }
800
801 static int
802 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state,
803                      uint16_t pvid)
804 {
805         struct hns3_hw *hw = &hns->hw;
806         struct hns3_tx_vtag_cfg txvlan_cfg;
807         int ret;
808
809         if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) {
810                 txvlan_cfg.accept_tag1 = true;
811                 txvlan_cfg.insert_tag1_en = false;
812                 txvlan_cfg.default_tag1 = 0;
813         } else {
814                 txvlan_cfg.accept_tag1 =
815                         hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE;
816                 txvlan_cfg.insert_tag1_en = true;
817                 txvlan_cfg.default_tag1 = pvid;
818         }
819
820         txvlan_cfg.accept_untag1 = true;
821         txvlan_cfg.accept_tag2 = true;
822         txvlan_cfg.accept_untag2 = true;
823         txvlan_cfg.insert_tag2_en = false;
824         txvlan_cfg.default_tag2 = 0;
825         txvlan_cfg.tag_shift_mode_en = true;
826
827         ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg);
828         if (ret) {
829                 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid,
830                          ret);
831                 return ret;
832         }
833
834         hns3_update_tx_offload_cfg(hns, &txvlan_cfg);
835         return ret;
836 }
837
838
839 static void
840 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list)
841 {
842         struct hns3_user_vlan_table *vlan_entry;
843         struct hns3_pf *pf = &hns->pf;
844
845         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
846                 if (vlan_entry->hd_tbl_status) {
847                         hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0);
848                         vlan_entry->hd_tbl_status = false;
849                 }
850         }
851
852         if (is_del_list) {
853                 vlan_entry = LIST_FIRST(&pf->vlan_list);
854                 while (vlan_entry) {
855                         LIST_REMOVE(vlan_entry, next);
856                         rte_free(vlan_entry);
857                         vlan_entry = LIST_FIRST(&pf->vlan_list);
858                 }
859         }
860 }
861
862 static void
863 hns3_add_all_vlan_table(struct hns3_adapter *hns)
864 {
865         struct hns3_user_vlan_table *vlan_entry;
866         struct hns3_pf *pf = &hns->pf;
867
868         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
869                 if (!vlan_entry->hd_tbl_status) {
870                         hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1);
871                         vlan_entry->hd_tbl_status = true;
872                 }
873         }
874 }
875
876 static void
877 hns3_remove_all_vlan_table(struct hns3_adapter *hns)
878 {
879         struct hns3_hw *hw = &hns->hw;
880         int ret;
881
882         hns3_rm_all_vlan_table(hns, true);
883         if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) {
884                 ret = hns3_set_port_vlan_filter(hns,
885                                                 hw->port_base_vlan_cfg.pvid, 0);
886                 if (ret) {
887                         hns3_err(hw, "Failed to remove all vlan table, ret =%d",
888                                  ret);
889                         return;
890                 }
891         }
892 }
893
894 static int
895 hns3_update_vlan_filter_entries(struct hns3_adapter *hns,
896                         uint16_t port_base_vlan_state, uint16_t new_pvid)
897 {
898         struct hns3_hw *hw = &hns->hw;
899         uint16_t old_pvid;
900         int ret;
901
902         if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) {
903                 old_pvid = hw->port_base_vlan_cfg.pvid;
904                 if (old_pvid != HNS3_INVALID_PVID) {
905                         ret = hns3_set_port_vlan_filter(hns, old_pvid, 0);
906                         if (ret) {
907                                 hns3_err(hw, "failed to remove old pvid %u, "
908                                                 "ret = %d", old_pvid, ret);
909                                 return ret;
910                         }
911                 }
912
913                 hns3_rm_all_vlan_table(hns, false);
914                 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1);
915                 if (ret) {
916                         hns3_err(hw, "failed to add new pvid %u, ret = %d",
917                                         new_pvid, ret);
918                         return ret;
919                 }
920         } else {
921                 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0);
922                 if (ret) {
923                         hns3_err(hw, "failed to remove pvid %u, ret = %d",
924                                         new_pvid, ret);
925                         return ret;
926                 }
927
928                 hns3_add_all_vlan_table(hns);
929         }
930         return 0;
931 }
932
933 static int
934 hns3_en_pvid_strip(struct hns3_adapter *hns, int on)
935 {
936         struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg;
937         struct hns3_rx_vtag_cfg rx_vlan_cfg;
938         bool rx_strip_en;
939         int ret;
940
941         rx_strip_en = old_cfg->rx_vlan_offload_en;
942         if (on) {
943                 rx_vlan_cfg.strip_tag1_en = rx_strip_en;
944                 rx_vlan_cfg.strip_tag2_en = true;
945                 rx_vlan_cfg.strip_tag2_discard_en = true;
946         } else {
947                 rx_vlan_cfg.strip_tag1_en = false;
948                 rx_vlan_cfg.strip_tag2_en = rx_strip_en;
949                 rx_vlan_cfg.strip_tag2_discard_en = false;
950         }
951         rx_vlan_cfg.strip_tag1_discard_en = false;
952         rx_vlan_cfg.vlan1_vlan_prionly = false;
953         rx_vlan_cfg.vlan2_vlan_prionly = false;
954         rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en;
955
956         ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg);
957         if (ret)
958                 return ret;
959
960         hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg);
961         return ret;
962 }
963
964 static int
965 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on)
966 {
967         struct hns3_hw *hw = &hns->hw;
968         uint16_t port_base_vlan_state;
969         int ret, err;
970
971         if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) {
972                 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID)
973                         hns3_warn(hw, "Invalid operation! As current pvid set "
974                                   "is %u, disable pvid %u is invalid",
975                                   hw->port_base_vlan_cfg.pvid, pvid);
976                 return 0;
977         }
978
979         port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE :
980                                     HNS3_PORT_BASE_VLAN_DISABLE;
981         ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid);
982         if (ret) {
983                 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d",
984                          ret);
985                 return ret;
986         }
987
988         ret = hns3_en_pvid_strip(hns, on);
989         if (ret) {
990                 hns3_err(hw, "failed to config rx vlan strip for pvid, "
991                          "ret = %d", ret);
992                 goto pvid_vlan_strip_fail;
993         }
994
995         if (pvid == HNS3_INVALID_PVID)
996                 goto out;
997         ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid);
998         if (ret) {
999                 hns3_err(hw, "failed to update vlan filter entries, ret = %d",
1000                          ret);
1001                 goto vlan_filter_set_fail;
1002         }
1003
1004 out:
1005         hw->port_base_vlan_cfg.state = port_base_vlan_state;
1006         hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID;
1007         return ret;
1008
1009 vlan_filter_set_fail:
1010         err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state ==
1011                                         HNS3_PORT_BASE_VLAN_ENABLE);
1012         if (err)
1013                 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err);
1014
1015 pvid_vlan_strip_fail:
1016         err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state,
1017                                         hw->port_base_vlan_cfg.pvid);
1018         if (err)
1019                 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err);
1020
1021         return ret;
1022 }
1023
1024 static int
1025 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1026 {
1027         struct hns3_adapter *hns = dev->data->dev_private;
1028         struct hns3_hw *hw = &hns->hw;
1029         bool pvid_en_state_change;
1030         uint16_t pvid_state;
1031         int ret;
1032
1033         if (pvid > RTE_ETHER_MAX_VLAN_ID) {
1034                 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid,
1035                          RTE_ETHER_MAX_VLAN_ID);
1036                 return -EINVAL;
1037         }
1038
1039         /*
1040          * If PVID configuration state change, should refresh the PVID
1041          * configuration state in struct hns3_tx_queue/hns3_rx_queue.
1042          */
1043         pvid_state = hw->port_base_vlan_cfg.state;
1044         if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) ||
1045             (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE))
1046                 pvid_en_state_change = false;
1047         else
1048                 pvid_en_state_change = true;
1049
1050         rte_spinlock_lock(&hw->lock);
1051         ret = hns3_vlan_pvid_configure(hns, pvid, on);
1052         rte_spinlock_unlock(&hw->lock);
1053         if (ret)
1054                 return ret;
1055         /*
1056          * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx
1057          * need be processed by PMD.
1058          */
1059         if (pvid_en_state_change &&
1060             hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1061                 hns3_update_all_queues_pvid_proc_en(hw);
1062
1063         return 0;
1064 }
1065
1066 static int
1067 hns3_default_vlan_config(struct hns3_adapter *hns)
1068 {
1069         struct hns3_hw *hw = &hns->hw;
1070         int ret;
1071
1072         /*
1073          * When vlan filter is enabled, hardware regards packets without vlan
1074          * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan
1075          * table, packets without vlan won't be received. So, add vlan 0 as
1076          * the default vlan.
1077          */
1078         ret = hns3_vlan_filter_configure(hns, 0, 1);
1079         if (ret)
1080                 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret);
1081         return ret;
1082 }
1083
1084 static int
1085 hns3_init_vlan_config(struct hns3_adapter *hns)
1086 {
1087         struct hns3_hw *hw = &hns->hw;
1088         int ret;
1089
1090         /*
1091          * This function can be called in the initialization and reset process,
1092          * when in reset process, it means that hardware had been reseted
1093          * successfully and we need to restore the hardware configuration to
1094          * ensure that the hardware configuration remains unchanged before and
1095          * after reset.
1096          */
1097         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1098                 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
1099                 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
1100         }
1101
1102         ret = hns3_vlan_filter_init(hns);
1103         if (ret) {
1104                 hns3_err(hw, "vlan init fail in pf, ret =%d", ret);
1105                 return ret;
1106         }
1107
1108         ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
1109                                        RTE_ETHER_TYPE_VLAN);
1110         if (ret) {
1111                 hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
1112                 return ret;
1113         }
1114
1115         /*
1116          * When in the reinit dev stage of the reset process, the following
1117          * vlan-related configurations may differ from those at initialization,
1118          * we will restore configurations to hardware in hns3_restore_vlan_table
1119          * and hns3_restore_vlan_conf later.
1120          */
1121         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1122                 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
1123                 if (ret) {
1124                         hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
1125                         return ret;
1126                 }
1127
1128                 ret = hns3_en_hw_strip_rxvtag(hns, false);
1129                 if (ret) {
1130                         hns3_err(hw, "rx strip configure fail in pf, ret =%d",
1131                                  ret);
1132                         return ret;
1133                 }
1134         }
1135
1136         return hns3_default_vlan_config(hns);
1137 }
1138
1139 static int
1140 hns3_restore_vlan_conf(struct hns3_adapter *hns)
1141 {
1142         struct hns3_pf *pf = &hns->pf;
1143         struct hns3_hw *hw = &hns->hw;
1144         uint64_t offloads;
1145         bool enable;
1146         int ret;
1147
1148         if (!hw->data->promiscuous) {
1149                 /* restore vlan filter states */
1150                 offloads = hw->data->dev_conf.rxmode.offloads;
1151                 enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
1152                 ret = hns3_enable_vlan_filter(hns, enable);
1153                 if (ret) {
1154                         hns3_err(hw, "failed to restore vlan rx filter conf, "
1155                                  "ret = %d", ret);
1156                         return ret;
1157                 }
1158         }
1159
1160         ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg);
1161         if (ret) {
1162                 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret);
1163                 return ret;
1164         }
1165
1166         ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg);
1167         if (ret)
1168                 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret);
1169
1170         return ret;
1171 }
1172
1173 static int
1174 hns3_dev_configure_vlan(struct rte_eth_dev *dev)
1175 {
1176         struct hns3_adapter *hns = dev->data->dev_private;
1177         struct rte_eth_dev_data *data = dev->data;
1178         struct rte_eth_txmode *txmode;
1179         struct hns3_hw *hw = &hns->hw;
1180         int mask;
1181         int ret;
1182
1183         txmode = &data->dev_conf.txmode;
1184         if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged)
1185                 hns3_warn(hw,
1186                           "hw_vlan_reject_tagged or hw_vlan_reject_untagged "
1187                           "configuration is not supported! Ignore these two "
1188                           "parameters: hw_vlan_reject_tagged(%u), "
1189                           "hw_vlan_reject_untagged(%u)",
1190                           txmode->hw_vlan_reject_tagged,
1191                           txmode->hw_vlan_reject_untagged);
1192
1193         /* Apply vlan offload setting */
1194         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
1195         ret = hns3_vlan_offload_set(dev, mask);
1196         if (ret) {
1197                 hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
1198                          ret);
1199                 return ret;
1200         }
1201
1202         /*
1203          * If pvid config is not set in rte_eth_conf, driver needn't to set
1204          * VLAN pvid related configuration to hardware.
1205          */
1206         if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0)
1207                 return 0;
1208
1209         /* Apply pvid setting */
1210         ret = hns3_vlan_pvid_set(dev, txmode->pvid,
1211                                  txmode->hw_vlan_insert_pvid);
1212         if (ret)
1213                 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d",
1214                          txmode->pvid, ret);
1215
1216         return ret;
1217 }
1218
1219 static int
1220 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
1221                 unsigned int tso_mss_max)
1222 {
1223         struct hns3_cfg_tso_status_cmd *req;
1224         struct hns3_cmd_desc desc;
1225         uint16_t tso_mss;
1226
1227         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false);
1228
1229         req = (struct hns3_cfg_tso_status_cmd *)desc.data;
1230
1231         tso_mss = 0;
1232         hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1233                        tso_mss_min);
1234         req->tso_mss_min = rte_cpu_to_le_16(tso_mss);
1235
1236         tso_mss = 0;
1237         hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1238                        tso_mss_max);
1239         req->tso_mss_max = rte_cpu_to_le_16(tso_mss);
1240
1241         return hns3_cmd_send(hw, &desc, 1);
1242 }
1243
1244 static int
1245 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size,
1246                    uint16_t *allocated_size, bool is_alloc)
1247 {
1248         struct hns3_umv_spc_alc_cmd *req;
1249         struct hns3_cmd_desc desc;
1250         int ret;
1251
1252         req = (struct hns3_umv_spc_alc_cmd *)desc.data;
1253         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false);
1254         hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1);
1255         req->space_size = rte_cpu_to_le_32(space_size);
1256
1257         ret = hns3_cmd_send(hw, &desc, 1);
1258         if (ret) {
1259                 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d",
1260                              is_alloc ? "allocate" : "free", ret);
1261                 return ret;
1262         }
1263
1264         if (is_alloc && allocated_size)
1265                 *allocated_size = rte_le_to_cpu_32(desc.data[1]);
1266
1267         return 0;
1268 }
1269
1270 static int
1271 hns3_init_umv_space(struct hns3_hw *hw)
1272 {
1273         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1274         struct hns3_pf *pf = &hns->pf;
1275         uint16_t allocated_size = 0;
1276         int ret;
1277
1278         ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size,
1279                                  true);
1280         if (ret)
1281                 return ret;
1282
1283         if (allocated_size < pf->wanted_umv_size)
1284                 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u",
1285                              pf->wanted_umv_size, allocated_size);
1286
1287         pf->max_umv_size = (!!allocated_size) ? allocated_size :
1288                                                 pf->wanted_umv_size;
1289         pf->used_umv_size = 0;
1290         return 0;
1291 }
1292
1293 static int
1294 hns3_uninit_umv_space(struct hns3_hw *hw)
1295 {
1296         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1297         struct hns3_pf *pf = &hns->pf;
1298         int ret;
1299
1300         if (pf->max_umv_size == 0)
1301                 return 0;
1302
1303         ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false);
1304         if (ret)
1305                 return ret;
1306
1307         pf->max_umv_size = 0;
1308
1309         return 0;
1310 }
1311
1312 static bool
1313 hns3_is_umv_space_full(struct hns3_hw *hw)
1314 {
1315         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1316         struct hns3_pf *pf = &hns->pf;
1317         bool is_full;
1318
1319         is_full = (pf->used_umv_size >= pf->max_umv_size);
1320
1321         return is_full;
1322 }
1323
1324 static void
1325 hns3_update_umv_space(struct hns3_hw *hw, bool is_free)
1326 {
1327         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1328         struct hns3_pf *pf = &hns->pf;
1329
1330         if (is_free) {
1331                 if (pf->used_umv_size > 0)
1332                         pf->used_umv_size--;
1333         } else
1334                 pf->used_umv_size++;
1335 }
1336
1337 static void
1338 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req,
1339                       const uint8_t *addr, bool is_mc)
1340 {
1341         const unsigned char *mac_addr = addr;
1342         uint32_t high_val = ((uint32_t)mac_addr[3] << 24) |
1343                             ((uint32_t)mac_addr[2] << 16) |
1344                             ((uint32_t)mac_addr[1] << 8) |
1345                             (uint32_t)mac_addr[0];
1346         uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4];
1347
1348         hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1349         if (is_mc) {
1350                 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1351                 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1);
1352                 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1353         }
1354
1355         new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val);
1356         new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff);
1357 }
1358
1359 static int
1360 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp,
1361                              uint8_t resp_code,
1362                              enum hns3_mac_vlan_tbl_opcode op)
1363 {
1364         if (cmdq_resp) {
1365                 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u",
1366                          cmdq_resp);
1367                 return -EIO;
1368         }
1369
1370         if (op == HNS3_MAC_VLAN_ADD) {
1371                 if (resp_code == 0 || resp_code == 1) {
1372                         return 0;
1373                 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) {
1374                         hns3_err(hw, "add mac addr failed for uc_overflow");
1375                         return -ENOSPC;
1376                 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) {
1377                         hns3_err(hw, "add mac addr failed for mc_overflow");
1378                         return -ENOSPC;
1379                 }
1380
1381                 hns3_err(hw, "add mac addr failed for undefined, code=%u",
1382                          resp_code);
1383                 return -EIO;
1384         } else if (op == HNS3_MAC_VLAN_REMOVE) {
1385                 if (resp_code == 0) {
1386                         return 0;
1387                 } else if (resp_code == 1) {
1388                         hns3_dbg(hw, "remove mac addr failed for miss");
1389                         return -ENOENT;
1390                 }
1391
1392                 hns3_err(hw, "remove mac addr failed for undefined, code=%u",
1393                          resp_code);
1394                 return -EIO;
1395         } else if (op == HNS3_MAC_VLAN_LKUP) {
1396                 if (resp_code == 0) {
1397                         return 0;
1398                 } else if (resp_code == 1) {
1399                         hns3_dbg(hw, "lookup mac addr failed for miss");
1400                         return -ENOENT;
1401                 }
1402
1403                 hns3_err(hw, "lookup mac addr failed for undefined, code=%u",
1404                          resp_code);
1405                 return -EIO;
1406         }
1407
1408         hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u",
1409                  op);
1410
1411         return -EINVAL;
1412 }
1413
1414 static int
1415 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw,
1416                          struct hns3_mac_vlan_tbl_entry_cmd *req,
1417                          struct hns3_cmd_desc *desc, uint8_t desc_num)
1418 {
1419         uint8_t resp_code;
1420         uint16_t retval;
1421         int ret;
1422         int i;
1423
1424         if (desc_num == HNS3_MC_MAC_VLAN_OPS_DESC_NUM) {
1425                 for (i = 0; i < desc_num - 1; i++) {
1426                         hns3_cmd_setup_basic_desc(&desc[i],
1427                                                   HNS3_OPC_MAC_VLAN_ADD, true);
1428                         desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1429                         if (i == 0)
1430                                 memcpy(desc[i].data, req,
1431                                 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1432                 }
1433                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_MAC_VLAN_ADD,
1434                                           true);
1435         } else {
1436                 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD,
1437                                           true);
1438                 memcpy(desc[0].data, req,
1439                        sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1440         }
1441         ret = hns3_cmd_send(hw, desc, desc_num);
1442         if (ret) {
1443                 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.",
1444                          ret);
1445                 return ret;
1446         }
1447         resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1448         retval = rte_le_to_cpu_16(desc[0].retval);
1449
1450         return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1451                                             HNS3_MAC_VLAN_LKUP);
1452 }
1453
1454 static int
1455 hns3_add_mac_vlan_tbl(struct hns3_hw *hw,
1456                       struct hns3_mac_vlan_tbl_entry_cmd *req,
1457                       struct hns3_cmd_desc *desc, uint8_t desc_num)
1458 {
1459         uint8_t resp_code;
1460         uint16_t retval;
1461         int cfg_status;
1462         int ret;
1463         int i;
1464
1465         if (desc_num == HNS3_UC_MAC_VLAN_OPS_DESC_NUM) {
1466                 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_MAC_VLAN_ADD, false);
1467                 memcpy(desc->data, req,
1468                        sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1469                 ret = hns3_cmd_send(hw, desc, desc_num);
1470                 resp_code = (rte_le_to_cpu_32(desc->data[0]) >> 8) & 0xff;
1471                 retval = rte_le_to_cpu_16(desc->retval);
1472
1473                 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1474                                                           HNS3_MAC_VLAN_ADD);
1475         } else {
1476                 for (i = 0; i < desc_num; i++) {
1477                         hns3_cmd_reuse_desc(&desc[i], false);
1478                         if (i == desc_num - 1)
1479                                 desc[i].flag &=
1480                                         rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT);
1481                         else
1482                                 desc[i].flag |=
1483                                         rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1484                 }
1485                 memcpy(desc[0].data, req,
1486                        sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1487                 desc[0].retval = 0;
1488                 ret = hns3_cmd_send(hw, desc, desc_num);
1489                 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1490                 retval = rte_le_to_cpu_16(desc[0].retval);
1491
1492                 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1493                                                           HNS3_MAC_VLAN_ADD);
1494         }
1495
1496         if (ret) {
1497                 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret);
1498                 return ret;
1499         }
1500
1501         return cfg_status;
1502 }
1503
1504 static int
1505 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw,
1506                          struct hns3_mac_vlan_tbl_entry_cmd *req)
1507 {
1508         struct hns3_cmd_desc desc;
1509         uint8_t resp_code;
1510         uint16_t retval;
1511         int ret;
1512
1513         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false);
1514
1515         memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1516
1517         ret = hns3_cmd_send(hw, &desc, 1);
1518         if (ret) {
1519                 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret);
1520                 return ret;
1521         }
1522         resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1523         retval = rte_le_to_cpu_16(desc.retval);
1524
1525         return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1526                                             HNS3_MAC_VLAN_REMOVE);
1527 }
1528
1529 static int
1530 hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1531 {
1532         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1533         struct hns3_mac_vlan_tbl_entry_cmd req;
1534         struct hns3_pf *pf = &hns->pf;
1535         struct hns3_cmd_desc desc;
1536         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1537         uint16_t egress_port = 0;
1538         uint8_t vf_id;
1539         int ret;
1540
1541         /* check if mac addr is valid */
1542         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1543                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1544                                       mac_addr);
1545                 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid",
1546                          mac_str);
1547                 return -EINVAL;
1548         }
1549
1550         memset(&req, 0, sizeof(req));
1551
1552         /*
1553          * In current version VF is not supported when PF is driven by DPDK
1554          * driver, just need to configure parameters for PF vport.
1555          */
1556         vf_id = HNS3_PF_FUNC_ID;
1557         hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M,
1558                        HNS3_MAC_EPORT_VFID_S, vf_id);
1559
1560         req.egress_port = rte_cpu_to_le_16(egress_port);
1561
1562         hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1563
1564         /*
1565          * Lookup the mac address in the mac_vlan table, and add
1566          * it if the entry is inexistent. Repeated unicast entry
1567          * is not allowed in the mac vlan table.
1568          */
1569         ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc,
1570                                         HNS3_UC_MAC_VLAN_OPS_DESC_NUM);
1571         if (ret == -ENOENT) {
1572                 if (!hns3_is_umv_space_full(hw)) {
1573                         ret = hns3_add_mac_vlan_tbl(hw, &req, &desc,
1574                                                 HNS3_UC_MAC_VLAN_OPS_DESC_NUM);
1575                         if (!ret)
1576                                 hns3_update_umv_space(hw, false);
1577                         return ret;
1578                 }
1579
1580                 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size);
1581
1582                 return -ENOSPC;
1583         }
1584
1585         hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
1586
1587         /* check if we just hit the duplicate */
1588         if (ret == 0) {
1589                 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str);
1590                 return 0;
1591         }
1592
1593         hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table",
1594                  mac_str);
1595
1596         return ret;
1597 }
1598
1599 static int
1600 hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1601 {
1602         struct hns3_mac_vlan_tbl_entry_cmd req;
1603         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1604         int ret;
1605
1606         /* check if mac addr is valid */
1607         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1608                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1609                                       mac_addr);
1610                 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid",
1611                          mac_str);
1612                 return -EINVAL;
1613         }
1614
1615         memset(&req, 0, sizeof(req));
1616         hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1617         hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1618         ret = hns3_remove_mac_vlan_tbl(hw, &req);
1619         if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */
1620                 return 0;
1621         else if (ret == 0)
1622                 hns3_update_umv_space(hw, true);
1623
1624         return ret;
1625 }
1626
1627 static int
1628 hns3_set_default_mac_addr(struct rte_eth_dev *dev,
1629                           struct rte_ether_addr *mac_addr)
1630 {
1631         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1632         struct rte_ether_addr *oaddr;
1633         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1634         int ret, ret_val;
1635
1636         rte_spinlock_lock(&hw->lock);
1637         oaddr = (struct rte_ether_addr *)hw->mac.mac_addr;
1638         ret = hw->ops.del_uc_mac_addr(hw, oaddr);
1639         if (ret) {
1640                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1641                                       oaddr);
1642                 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
1643                           mac_str, ret);
1644
1645                 rte_spinlock_unlock(&hw->lock);
1646                 return ret;
1647         }
1648
1649         ret = hw->ops.add_uc_mac_addr(hw, mac_addr);
1650         if (ret) {
1651                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1652                                       mac_addr);
1653                 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret);
1654                 goto err_add_uc_addr;
1655         }
1656
1657         ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes);
1658         if (ret) {
1659                 hns3_err(hw, "Failed to configure mac pause address: %d", ret);
1660                 goto err_pause_addr_cfg;
1661         }
1662
1663         rte_ether_addr_copy(mac_addr,
1664                             (struct rte_ether_addr *)hw->mac.mac_addr);
1665         rte_spinlock_unlock(&hw->lock);
1666
1667         return 0;
1668
1669 err_pause_addr_cfg:
1670         ret_val = hw->ops.del_uc_mac_addr(hw, mac_addr);
1671         if (ret_val) {
1672                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1673                                       mac_addr);
1674                 hns3_warn(hw,
1675                           "Failed to roll back to del setted mac addr(%s): %d",
1676                           mac_str, ret_val);
1677         }
1678
1679 err_add_uc_addr:
1680         ret_val = hw->ops.add_uc_mac_addr(hw, oaddr);
1681         if (ret_val) {
1682                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr);
1683                 hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d",
1684                                   mac_str, ret_val);
1685         }
1686         rte_spinlock_unlock(&hw->lock);
1687
1688         return ret;
1689 }
1690
1691 static void
1692 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr)
1693 {
1694 #define HNS3_VF_NUM_IN_FIRST_DESC 192
1695         uint8_t word_num;
1696         uint8_t bit_num;
1697
1698         if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) {
1699                 word_num = vfid / 32;
1700                 bit_num = vfid % 32;
1701                 if (clr)
1702                         desc[1].data[word_num] &=
1703                             rte_cpu_to_le_32(~(1UL << bit_num));
1704                 else
1705                         desc[1].data[word_num] |=
1706                             rte_cpu_to_le_32(1UL << bit_num);
1707         } else {
1708                 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32;
1709                 bit_num = vfid % 32;
1710                 if (clr)
1711                         desc[2].data[word_num] &=
1712                             rte_cpu_to_le_32(~(1UL << bit_num));
1713                 else
1714                         desc[2].data[word_num] |=
1715                             rte_cpu_to_le_32(1UL << bit_num);
1716         }
1717 }
1718
1719 static int
1720 hns3_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1721 {
1722         struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM];
1723         struct hns3_mac_vlan_tbl_entry_cmd req;
1724         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1725         uint8_t vf_id;
1726         int ret;
1727
1728         /* Check if mac addr is valid */
1729         if (!rte_is_multicast_ether_addr(mac_addr)) {
1730                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1731                                       mac_addr);
1732                 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid",
1733                          mac_str);
1734                 return -EINVAL;
1735         }
1736
1737         memset(&req, 0, sizeof(req));
1738         hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1739         hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1740         ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc,
1741                                         HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1742         if (ret) {
1743                 /* This mac addr do not exist, add new entry for it */
1744                 memset(desc[0].data, 0, sizeof(desc[0].data));
1745                 memset(desc[1].data, 0, sizeof(desc[0].data));
1746                 memset(desc[2].data, 0, sizeof(desc[0].data));
1747         }
1748
1749         /*
1750          * In current version VF is not supported when PF is driven by DPDK
1751          * driver, just need to configure parameters for PF vport.
1752          */
1753         vf_id = HNS3_PF_FUNC_ID;
1754         hns3_update_desc_vfid(desc, vf_id, false);
1755         ret = hns3_add_mac_vlan_tbl(hw, &req, desc,
1756                                         HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1757         if (ret) {
1758                 if (ret == -ENOSPC)
1759                         hns3_err(hw, "mc mac vlan table is full");
1760                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1761                                       mac_addr);
1762                 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret);
1763         }
1764
1765         return ret;
1766 }
1767
1768 static int
1769 hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1770 {
1771         struct hns3_mac_vlan_tbl_entry_cmd req;
1772         struct hns3_cmd_desc desc[3];
1773         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1774         uint8_t vf_id;
1775         int ret;
1776
1777         /* Check if mac addr is valid */
1778         if (!rte_is_multicast_ether_addr(mac_addr)) {
1779                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1780                                       mac_addr);
1781                 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid",
1782                          mac_str);
1783                 return -EINVAL;
1784         }
1785
1786         memset(&req, 0, sizeof(req));
1787         hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1788         hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1789         ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc,
1790                                         HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1791         if (ret == 0) {
1792                 /*
1793                  * This mac addr exist, remove this handle's VFID for it.
1794                  * In current version VF is not supported when PF is driven by
1795                  * DPDK driver, just need to configure parameters for PF vport.
1796                  */
1797                 vf_id = HNS3_PF_FUNC_ID;
1798                 hns3_update_desc_vfid(desc, vf_id, true);
1799
1800                 /* All the vfid is zero, so need to delete this entry */
1801                 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1802         } else if (ret == -ENOENT) {
1803                 /* This mac addr doesn't exist. */
1804                 return 0;
1805         }
1806
1807         if (ret) {
1808                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1809                                       mac_addr);
1810                 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret);
1811         }
1812
1813         return ret;
1814 }
1815
1816 static int
1817 hns3_check_mq_mode(struct rte_eth_dev *dev)
1818 {
1819         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1820         enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1821         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1822         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1823         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1824         struct rte_eth_dcb_tx_conf *dcb_tx_conf;
1825         uint8_t num_tc;
1826         int max_tc = 0;
1827         int i;
1828
1829         if (((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
1830             (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
1831              tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
1832                 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
1833                          rx_mq_mode, tx_mq_mode);
1834                 return -EOPNOTSUPP;
1835         }
1836
1837         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1838         dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
1839         if ((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
1840                 if (dcb_rx_conf->nb_tcs > pf->tc_max) {
1841                         hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
1842                                  dcb_rx_conf->nb_tcs, pf->tc_max);
1843                         return -EINVAL;
1844                 }
1845
1846                 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
1847                       dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
1848                         hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
1849                                  "nb_tcs(%d) != %d or %d in rx direction.",
1850                                  dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
1851                         return -EINVAL;
1852                 }
1853
1854                 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
1855                         hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
1856                                  dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
1857                         return -EINVAL;
1858                 }
1859
1860                 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1861                         if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
1862                                 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, "
1863                                          "is not equal to one in tx direction.",
1864                                          i, dcb_rx_conf->dcb_tc[i]);
1865                                 return -EINVAL;
1866                         }
1867                         if (dcb_rx_conf->dcb_tc[i] > max_tc)
1868                                 max_tc = dcb_rx_conf->dcb_tc[i];
1869                 }
1870
1871                 num_tc = max_tc + 1;
1872                 if (num_tc > dcb_rx_conf->nb_tcs) {
1873                         hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
1874                                  num_tc, dcb_rx_conf->nb_tcs);
1875                         return -EINVAL;
1876                 }
1877         }
1878
1879         return 0;
1880 }
1881
1882 static int
1883 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en,
1884                            enum hns3_ring_type queue_type, uint16_t queue_id)
1885 {
1886         struct hns3_cmd_desc desc;
1887         struct hns3_ctrl_vector_chain_cmd *req =
1888                 (struct hns3_ctrl_vector_chain_cmd *)desc.data;
1889         enum hns3_opcode_type op;
1890         uint16_t tqp_type_and_id = 0;
1891         uint16_t type;
1892         uint16_t gl;
1893         int ret;
1894
1895         op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
1896         hns3_cmd_setup_basic_desc(&desc, op, false);
1897         req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M,
1898                                               HNS3_TQP_INT_ID_L_S);
1899         req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M,
1900                                               HNS3_TQP_INT_ID_H_S);
1901
1902         if (queue_type == HNS3_RING_TYPE_RX)
1903                 gl = HNS3_RING_GL_RX;
1904         else
1905                 gl = HNS3_RING_GL_TX;
1906
1907         type = queue_type;
1908
1909         hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
1910                        type);
1911         hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
1912         hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
1913                        gl);
1914         req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
1915         req->int_cause_num = 1;
1916         ret = hns3_cmd_send(hw, &desc, 1);
1917         if (ret) {
1918                 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.",
1919                          en ? "Map" : "Unmap", queue_id, vector_id, ret);
1920                 return ret;
1921         }
1922
1923         return 0;
1924 }
1925
1926 static int
1927 hns3_setup_dcb(struct rte_eth_dev *dev)
1928 {
1929         struct hns3_adapter *hns = dev->data->dev_private;
1930         struct hns3_hw *hw = &hns->hw;
1931         int ret;
1932
1933         if (!hns3_dev_get_support(hw, DCB)) {
1934                 hns3_err(hw, "this port does not support dcb configurations.");
1935                 return -EOPNOTSUPP;
1936         }
1937
1938         if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
1939                 hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
1940                 return -EOPNOTSUPP;
1941         }
1942
1943         ret = hns3_dcb_configure(hns);
1944         if (ret)
1945                 hns3_err(hw, "failed to config dcb: %d", ret);
1946
1947         return ret;
1948 }
1949
1950 static int
1951 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
1952 {
1953         int ret;
1954
1955         /*
1956          * Some hardware doesn't support auto-negotiation, but users may not
1957          * configure link_speeds (default 0), which means auto-negotiation.
1958          * In this case, it should return success.
1959          */
1960         if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
1961             hw->mac.support_autoneg == 0)
1962                 return 0;
1963
1964         if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
1965                 ret = hns3_check_port_speed(hw, link_speeds);
1966                 if (ret)
1967                         return ret;
1968         }
1969
1970         return 0;
1971 }
1972
1973 static int
1974 hns3_check_dev_conf(struct rte_eth_dev *dev)
1975 {
1976         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1977         struct rte_eth_conf *conf = &dev->data->dev_conf;
1978         int ret;
1979
1980         ret = hns3_check_mq_mode(dev);
1981         if (ret)
1982                 return ret;
1983
1984         return hns3_check_link_speed(hw, conf->link_speeds);
1985 }
1986
1987 static int
1988 hns3_dev_configure(struct rte_eth_dev *dev)
1989 {
1990         struct hns3_adapter *hns = dev->data->dev_private;
1991         struct rte_eth_conf *conf = &dev->data->dev_conf;
1992         enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
1993         struct hns3_hw *hw = &hns->hw;
1994         uint16_t nb_rx_q = dev->data->nb_rx_queues;
1995         uint16_t nb_tx_q = dev->data->nb_tx_queues;
1996         struct rte_eth_rss_conf rss_conf;
1997         bool gro_en;
1998         int ret;
1999
2000         hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
2001
2002         /*
2003          * Some versions of hardware network engine does not support
2004          * individually enable/disable/reset the Tx or Rx queue. These devices
2005          * must enable/disable/reset Tx and Rx queues at the same time. When the
2006          * numbers of Tx queues allocated by upper applications are not equal to
2007          * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
2008          * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
2009          * work as usual. But these fake queues are imperceptible, and can not
2010          * be used by upper applications.
2011          */
2012         ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
2013         if (ret) {
2014                 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
2015                 hw->cfg_max_queues = 0;
2016                 return ret;
2017         }
2018
2019         hw->adapter_state = HNS3_NIC_CONFIGURING;
2020         ret = hns3_check_dev_conf(dev);
2021         if (ret)
2022                 goto cfg_err;
2023
2024         if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
2025                 ret = hns3_setup_dcb(dev);
2026                 if (ret)
2027                         goto cfg_err;
2028         }
2029
2030         /* When RSS is not configured, redirect the packet queue 0 */
2031         if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
2032                 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
2033                 rss_conf = conf->rx_adv_conf.rss_conf;
2034                 hw->rss_dis_flag = false;
2035                 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
2036                 if (ret)
2037                         goto cfg_err;
2038         }
2039
2040         ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu);
2041         if (ret != 0)
2042                 goto cfg_err;
2043
2044         ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf);
2045         if (ret)
2046                 goto cfg_err;
2047
2048         ret = hns3_dev_configure_vlan(dev);
2049         if (ret)
2050                 goto cfg_err;
2051
2052         /* config hardware GRO */
2053         gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
2054         ret = hns3_config_gro(hw, gro_en);
2055         if (ret)
2056                 goto cfg_err;
2057
2058         hns3_init_rx_ptype_tble(dev);
2059         hw->adapter_state = HNS3_NIC_CONFIGURED;
2060
2061         return 0;
2062
2063 cfg_err:
2064         hw->cfg_max_queues = 0;
2065         (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
2066         hw->adapter_state = HNS3_NIC_INITIALIZED;
2067
2068         return ret;
2069 }
2070
2071 static int
2072 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
2073 {
2074         struct hns3_config_max_frm_size_cmd *req;
2075         struct hns3_cmd_desc desc;
2076
2077         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);
2078
2079         req = (struct hns3_config_max_frm_size_cmd *)desc.data;
2080         req->max_frm_size = rte_cpu_to_le_16(new_mps);
2081         req->min_frm_size = RTE_ETHER_MIN_LEN;
2082
2083         return hns3_cmd_send(hw, &desc, 1);
2084 }
2085
2086 static int
2087 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
2088 {
2089         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2090         uint16_t original_mps = hns->pf.mps;
2091         int err;
2092         int ret;
2093
2094         ret = hns3_set_mac_mtu(hw, mps);
2095         if (ret) {
2096                 hns3_err(hw, "failed to set mtu, ret = %d", ret);
2097                 return ret;
2098         }
2099
2100         hns->pf.mps = mps;
2101         ret = hns3_buffer_alloc(hw);
2102         if (ret) {
2103                 hns3_err(hw, "failed to allocate buffer, ret = %d", ret);
2104                 goto rollback;
2105         }
2106
2107         return 0;
2108
2109 rollback:
2110         err = hns3_set_mac_mtu(hw, original_mps);
2111         if (err) {
2112                 hns3_err(hw, "fail to rollback MTU, err = %d", err);
2113                 return ret;
2114         }
2115         hns->pf.mps = original_mps;
2116
2117         return ret;
2118 }
2119
2120 static int
2121 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2122 {
2123         struct hns3_adapter *hns = dev->data->dev_private;
2124         uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
2125         struct hns3_hw *hw = &hns->hw;
2126         int ret;
2127
2128         if (dev->data->dev_started) {
2129                 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
2130                          "before configuration", dev->data->port_id);
2131                 return -EBUSY;
2132         }
2133
2134         rte_spinlock_lock(&hw->lock);
2135         frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
2136
2137         /*
2138          * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely
2139          * assign to "uint16_t" type variable.
2140          */
2141         ret = hns3_config_mtu(hw, (uint16_t)frame_size);
2142         if (ret) {
2143                 rte_spinlock_unlock(&hw->lock);
2144                 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d",
2145                          dev->data->port_id, mtu, ret);
2146                 return ret;
2147         }
2148
2149         rte_spinlock_unlock(&hw->lock);
2150
2151         return 0;
2152 }
2153
2154 static uint32_t
2155 hns3_get_copper_port_speed_capa(uint32_t supported_speed)
2156 {
2157         uint32_t speed_capa = 0;
2158
2159         if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
2160                 speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
2161         if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
2162                 speed_capa |= RTE_ETH_LINK_SPEED_10M;
2163         if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
2164                 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
2165         if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
2166                 speed_capa |= RTE_ETH_LINK_SPEED_100M;
2167         if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
2168                 speed_capa |= RTE_ETH_LINK_SPEED_1G;
2169
2170         return speed_capa;
2171 }
2172
2173 static uint32_t
2174 hns3_get_firber_port_speed_capa(uint32_t supported_speed)
2175 {
2176         uint32_t speed_capa = 0;
2177
2178         if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
2179                 speed_capa |= RTE_ETH_LINK_SPEED_1G;
2180         if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
2181                 speed_capa |= RTE_ETH_LINK_SPEED_10G;
2182         if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
2183                 speed_capa |= RTE_ETH_LINK_SPEED_25G;
2184         if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
2185                 speed_capa |= RTE_ETH_LINK_SPEED_40G;
2186         if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
2187                 speed_capa |= RTE_ETH_LINK_SPEED_50G;
2188         if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
2189                 speed_capa |= RTE_ETH_LINK_SPEED_100G;
2190         if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
2191                 speed_capa |= RTE_ETH_LINK_SPEED_200G;
2192
2193         return speed_capa;
2194 }
2195
2196 uint32_t
2197 hns3_get_speed_capa(struct hns3_hw *hw)
2198 {
2199         struct hns3_mac *mac = &hw->mac;
2200         uint32_t speed_capa;
2201
2202         if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
2203                 speed_capa =
2204                         hns3_get_copper_port_speed_capa(mac->supported_speed);
2205         else
2206                 speed_capa =
2207                         hns3_get_firber_port_speed_capa(mac->supported_speed);
2208
2209         if (mac->support_autoneg == 0)
2210                 speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
2211
2212         return speed_capa;
2213 }
2214
2215 static int
2216 hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
2217 {
2218         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2219         int ret;
2220
2221         (void)hns3_update_link_status(hw);
2222
2223         ret = hns3_update_link_info(eth_dev);
2224         if (ret)
2225                 hw->mac.link_status = RTE_ETH_LINK_DOWN;
2226
2227         return ret;
2228 }
2229
2230 static void
2231 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
2232                       struct rte_eth_link *new_link)
2233 {
2234         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2235         struct hns3_mac *mac = &hw->mac;
2236
2237         switch (mac->link_speed) {
2238         case RTE_ETH_SPEED_NUM_10M:
2239         case RTE_ETH_SPEED_NUM_100M:
2240         case RTE_ETH_SPEED_NUM_1G:
2241         case RTE_ETH_SPEED_NUM_10G:
2242         case RTE_ETH_SPEED_NUM_25G:
2243         case RTE_ETH_SPEED_NUM_40G:
2244         case RTE_ETH_SPEED_NUM_50G:
2245         case RTE_ETH_SPEED_NUM_100G:
2246         case RTE_ETH_SPEED_NUM_200G:
2247                 if (mac->link_status)
2248                         new_link->link_speed = mac->link_speed;
2249                 break;
2250         default:
2251                 if (mac->link_status)
2252                         new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
2253                 break;
2254         }
2255
2256         if (!mac->link_status)
2257                 new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
2258
2259         new_link->link_duplex = mac->link_duplex;
2260         new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
2261         new_link->link_autoneg = mac->link_autoneg;
2262 }
2263
2264 static int
2265 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
2266 {
2267 #define HNS3_LINK_CHECK_INTERVAL 100  /* 100ms */
2268 #define HNS3_MAX_LINK_CHECK_TIMES 20  /* 2s (100 * 20ms) in total */
2269
2270         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2271         uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES;
2272         struct hns3_mac *mac = &hw->mac;
2273         struct rte_eth_link new_link;
2274         int ret;
2275
2276         /* When port is stopped, report link down. */
2277         if (eth_dev->data->dev_started == 0) {
2278                 new_link.link_autoneg = mac->link_autoneg;
2279                 new_link.link_duplex = mac->link_duplex;
2280                 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2281                 new_link.link_status = RTE_ETH_LINK_DOWN;
2282                 goto out;
2283         }
2284
2285         do {
2286                 ret = hns3_update_port_link_info(eth_dev);
2287                 if (ret) {
2288                         hns3_err(hw, "failed to get port link info, ret = %d.",
2289                                  ret);
2290                         break;
2291                 }
2292
2293                 if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
2294                         break;
2295
2296                 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
2297         } while (retry_cnt--);
2298
2299         memset(&new_link, 0, sizeof(new_link));
2300         hns3_setup_linkstatus(eth_dev, &new_link);
2301
2302 out:
2303         return rte_eth_linkstatus_set(eth_dev, &new_link);
2304 }
2305
2306 static int
2307 hns3_dev_set_link_up(struct rte_eth_dev *dev)
2308 {
2309         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2310         int ret;
2311
2312         /*
2313          * The "tx_pkt_burst" will be restored. But the secondary process does
2314          * not support the mechanism for notifying the primary process.
2315          */
2316         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2317                 hns3_err(hw, "secondary process does not support to set link up.");
2318                 return -ENOTSUP;
2319         }
2320
2321         /*
2322          * If device isn't started Rx/Tx function is still disabled, setting
2323          * link up is not allowed. But it is probably better to return success
2324          * to reduce the impact on the upper layer.
2325          */
2326         if (hw->adapter_state != HNS3_NIC_STARTED) {
2327                 hns3_info(hw, "device isn't started, can't set link up.");
2328                 return 0;
2329         }
2330
2331         if (!hw->set_link_down)
2332                 return 0;
2333
2334         rte_spinlock_lock(&hw->lock);
2335         ret = hns3_cfg_mac_mode(hw, true);
2336         if (ret) {
2337                 rte_spinlock_unlock(&hw->lock);
2338                 hns3_err(hw, "failed to set link up, ret = %d", ret);
2339                 return ret;
2340         }
2341
2342         hw->set_link_down = false;
2343         hns3_start_tx_datapath(dev);
2344         rte_spinlock_unlock(&hw->lock);
2345
2346         return 0;
2347 }
2348
2349 static int
2350 hns3_dev_set_link_down(struct rte_eth_dev *dev)
2351 {
2352         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2353         int ret;
2354
2355         /*
2356          * The "tx_pkt_burst" will be set to dummy function. But the secondary
2357          * process does not support the mechanism for notifying the primary
2358          * process.
2359          */
2360         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2361                 hns3_err(hw, "secondary process does not support to set link down.");
2362                 return -ENOTSUP;
2363         }
2364
2365         /*
2366          * If device isn't started or the API has been called, link status is
2367          * down, return success.
2368          */
2369         if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down)
2370                 return 0;
2371
2372         rte_spinlock_lock(&hw->lock);
2373         hns3_stop_tx_datapath(dev);
2374         ret = hns3_cfg_mac_mode(hw, false);
2375         if (ret) {
2376                 hns3_start_tx_datapath(dev);
2377                 rte_spinlock_unlock(&hw->lock);
2378                 hns3_err(hw, "failed to set link down, ret = %d", ret);
2379                 return ret;
2380         }
2381
2382         hw->set_link_down = true;
2383         rte_spinlock_unlock(&hw->lock);
2384
2385         return 0;
2386 }
2387
2388 static int
2389 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
2390 {
2391         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2392         struct hns3_pf *pf = &hns->pf;
2393
2394         if (!(status->pf_state & HNS3_PF_STATE_DONE))
2395                 return -EINVAL;
2396
2397         pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;
2398
2399         return 0;
2400 }
2401
2402 static int
2403 hns3_query_function_status(struct hns3_hw *hw)
2404 {
2405 #define HNS3_QUERY_MAX_CNT              10
2406 #define HNS3_QUERY_SLEEP_MSCOEND        1
2407         struct hns3_func_status_cmd *req;
2408         struct hns3_cmd_desc desc;
2409         int timeout = 0;
2410         int ret;
2411
2412         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);
2413         req = (struct hns3_func_status_cmd *)desc.data;
2414
2415         do {
2416                 ret = hns3_cmd_send(hw, &desc, 1);
2417                 if (ret) {
2418                         PMD_INIT_LOG(ERR, "query function status failed %d",
2419                                      ret);
2420                         return ret;
2421                 }
2422
2423                 /* Check pf reset is done */
2424                 if (req->pf_state)
2425                         break;
2426
2427                 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);
2428         } while (timeout++ < HNS3_QUERY_MAX_CNT);
2429
2430         return hns3_parse_func_status(hw, req);
2431 }
2432
2433 static int
2434 hns3_get_pf_max_tqp_num(struct hns3_hw *hw)
2435 {
2436         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2437         struct hns3_pf *pf = &hns->pf;
2438
2439         if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) {
2440                 /*
2441                  * The total_tqps_num obtained from firmware is maximum tqp
2442                  * numbers of this port, which should be used for PF and VFs.
2443                  * There is no need for pf to have so many tqp numbers in
2444                  * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
2445                  * coming from config file, is assigned to maximum queue number
2446                  * for the PF of this port by user. So users can modify the
2447                  * maximum queue number of PF according to their own application
2448                  * scenarios, which is more flexible to use. In addition, many
2449                  * memories can be saved due to allocating queue statistics
2450                  * room according to the actual number of queues required. The
2451                  * maximum queue number of PF for network engine with
2452                  * revision_id greater than 0x30 is assigned by config file.
2453                  */
2454                 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) {
2455                         hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) "
2456                                  "must be greater than 0.",
2457                                  RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF);
2458                         return -EINVAL;
2459                 }
2460
2461                 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
2462                                        hw->total_tqps_num);
2463         } else {
2464                 /*
2465                  * Due to the limitation on the number of PF interrupts
2466                  * available, the maximum queue number assigned to PF on
2467                  * the network engine with revision_id 0x21 is 64.
2468                  */
2469                 hw->tqps_num = RTE_MIN(hw->total_tqps_num,
2470                                        HNS3_MAX_TQP_NUM_HIP08_PF);
2471         }
2472
2473         return 0;
2474 }
2475
2476 static int
2477 hns3_query_pf_resource(struct hns3_hw *hw)
2478 {
2479         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2480         struct hns3_pf *pf = &hns->pf;
2481         struct hns3_pf_res_cmd *req;
2482         struct hns3_cmd_desc desc;
2483         int ret;
2484
2485         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
2486         ret = hns3_cmd_send(hw, &desc, 1);
2487         if (ret) {
2488                 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret);
2489                 return ret;
2490         }
2491
2492         req = (struct hns3_pf_res_cmd *)desc.data;
2493         hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) +
2494                              rte_le_to_cpu_16(req->ext_tqp_num);
2495         ret = hns3_get_pf_max_tqp_num(hw);
2496         if (ret)
2497                 return ret;
2498
2499         pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
2500         pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number);
2501
2502         if (req->tx_buf_size)
2503                 pf->tx_buf_size =
2504                     rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;
2505         else
2506                 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF;
2507
2508         pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);
2509
2510         if (req->dv_buf_size)
2511                 pf->dv_buf_size =
2512                     rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;
2513         else
2514                 pf->dv_buf_size = HNS3_DEFAULT_DV;
2515
2516         pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
2517
2518         hw->num_msi =
2519                 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number),
2520                                HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
2521
2522         return 0;
2523 }
2524
2525 static void
2526 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)
2527 {
2528         struct hns3_cfg_param_cmd *req;
2529         uint64_t mac_addr_tmp_high;
2530         uint8_t ext_rss_size_max;
2531         uint64_t mac_addr_tmp;
2532         uint32_t i;
2533
2534         req = (struct hns3_cfg_param_cmd *)desc[0].data;
2535
2536         /* get the configuration */
2537         cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2538                                      HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
2539         cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2540                                            HNS3_CFG_TQP_DESC_N_M,
2541                                            HNS3_CFG_TQP_DESC_N_S);
2542
2543         cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2544                                        HNS3_CFG_PHY_ADDR_M,
2545                                        HNS3_CFG_PHY_ADDR_S);
2546         cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2547                                          HNS3_CFG_MEDIA_TP_M,
2548                                          HNS3_CFG_MEDIA_TP_S);
2549         cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2550                                          HNS3_CFG_RX_BUF_LEN_M,
2551                                          HNS3_CFG_RX_BUF_LEN_S);
2552         /* get mac address */
2553         mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
2554         mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2555                                            HNS3_CFG_MAC_ADDR_H_M,
2556                                            HNS3_CFG_MAC_ADDR_H_S);
2557
2558         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
2559
2560         cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2561                                             HNS3_CFG_DEFAULT_SPEED_M,
2562                                             HNS3_CFG_DEFAULT_SPEED_S);
2563         cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2564                                            HNS3_CFG_RSS_SIZE_M,
2565                                            HNS3_CFG_RSS_SIZE_S);
2566
2567         for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
2568                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
2569
2570         req = (struct hns3_cfg_param_cmd *)desc[1].data;
2571         cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);
2572
2573         cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2574                                             HNS3_CFG_SPEED_ABILITY_M,
2575                                             HNS3_CFG_SPEED_ABILITY_S);
2576         cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2577                                         HNS3_CFG_UMV_TBL_SPACE_M,
2578                                         HNS3_CFG_UMV_TBL_SPACE_S);
2579         if (!cfg->umv_space)
2580                 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;
2581
2582         ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]),
2583                                                HNS3_CFG_EXT_RSS_SIZE_M,
2584                                                HNS3_CFG_EXT_RSS_SIZE_S);
2585         /*
2586          * Field ext_rss_size_max obtained from firmware will be more flexible
2587          * for future changes and expansions, which is an exponent of 2, instead
2588          * of reading out directly. If this field is not zero, hns3 PF PMD
2589          * uses it as rss_size_max under one TC. Device, whose revision
2590          * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the
2591          * maximum number of queues supported under a TC through this field.
2592          */
2593         if (ext_rss_size_max)
2594                 cfg->rss_size_max = 1U << ext_rss_size_max;
2595 }
2596
2597 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash
2598  * @hw: pointer to struct hns3_hw
2599  * @hcfg: the config structure to be getted
2600  */
2601 static int
2602 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)
2603 {
2604         struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];
2605         struct hns3_cfg_param_cmd *req;
2606         uint32_t offset;
2607         uint32_t i;
2608         int ret;
2609
2610         for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {
2611                 offset = 0;
2612                 req = (struct hns3_cfg_param_cmd *)desc[i].data;
2613                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,
2614                                           true);
2615                 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,
2616                                i * HNS3_CFG_RD_LEN_BYTES);
2617                 /* Len should be divided by 4 when send to hardware */
2618                 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,
2619                                HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);
2620                 req->offset = rte_cpu_to_le_32(offset);
2621         }
2622
2623         ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);
2624         if (ret) {
2625                 PMD_INIT_LOG(ERR, "get config failed %d.", ret);
2626                 return ret;
2627         }
2628
2629         hns3_parse_cfg(hcfg, desc);
2630
2631         return 0;
2632 }
2633
2634 static int
2635 hns3_parse_speed(int speed_cmd, uint32_t *speed)
2636 {
2637         switch (speed_cmd) {
2638         case HNS3_CFG_SPEED_10M:
2639                 *speed = RTE_ETH_SPEED_NUM_10M;
2640                 break;
2641         case HNS3_CFG_SPEED_100M:
2642                 *speed = RTE_ETH_SPEED_NUM_100M;
2643                 break;
2644         case HNS3_CFG_SPEED_1G:
2645                 *speed = RTE_ETH_SPEED_NUM_1G;
2646                 break;
2647         case HNS3_CFG_SPEED_10G:
2648                 *speed = RTE_ETH_SPEED_NUM_10G;
2649                 break;
2650         case HNS3_CFG_SPEED_25G:
2651                 *speed = RTE_ETH_SPEED_NUM_25G;
2652                 break;
2653         case HNS3_CFG_SPEED_40G:
2654                 *speed = RTE_ETH_SPEED_NUM_40G;
2655                 break;
2656         case HNS3_CFG_SPEED_50G:
2657                 *speed = RTE_ETH_SPEED_NUM_50G;
2658                 break;
2659         case HNS3_CFG_SPEED_100G:
2660                 *speed = RTE_ETH_SPEED_NUM_100G;
2661                 break;
2662         case HNS3_CFG_SPEED_200G:
2663                 *speed = RTE_ETH_SPEED_NUM_200G;
2664                 break;
2665         default:
2666                 return -EINVAL;
2667         }
2668
2669         return 0;
2670 }
2671
2672 static void
2673 hns3_set_default_dev_specifications(struct hns3_hw *hw)
2674 {
2675         hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
2676         hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
2677         hw->rss_key_size = HNS3_RSS_KEY_SIZE;
2678         hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
2679         hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
2680 }
2681
2682 static void
2683 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
2684 {
2685         struct hns3_dev_specs_0_cmd *req0;
2686
2687         req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
2688
2689         hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
2690         hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
2691         hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
2692         hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
2693         hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
2694 }
2695
2696 static int
2697 hns3_check_dev_specifications(struct hns3_hw *hw)
2698 {
2699         if (hw->rss_ind_tbl_size == 0 ||
2700             hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
2701                 hns3_err(hw, "the size of hash lookup table configured (%u)"
2702                               " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
2703                               HNS3_RSS_IND_TBL_SIZE_MAX);
2704                 return -EINVAL;
2705         }
2706
2707         return 0;
2708 }
2709
2710 static int
2711 hns3_query_dev_specifications(struct hns3_hw *hw)
2712 {
2713         struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
2714         int ret;
2715         int i;
2716
2717         for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
2718                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
2719                                           true);
2720                 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
2721         }
2722         hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
2723
2724         ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
2725         if (ret)
2726                 return ret;
2727
2728         hns3_parse_dev_specifications(hw, desc);
2729
2730         return hns3_check_dev_specifications(hw);
2731 }
2732
2733 static int
2734 hns3_get_capability(struct hns3_hw *hw)
2735 {
2736         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2737         struct rte_pci_device *pci_dev;
2738         struct hns3_pf *pf = &hns->pf;
2739         struct rte_eth_dev *eth_dev;
2740         uint16_t device_id;
2741         uint8_t revision;
2742         int ret;
2743
2744         eth_dev = &rte_eth_devices[hw->data->port_id];
2745         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2746         device_id = pci_dev->id.device_id;
2747
2748         if (device_id == HNS3_DEV_ID_25GE_RDMA ||
2749             device_id == HNS3_DEV_ID_50GE_RDMA ||
2750             device_id == HNS3_DEV_ID_100G_RDMA_MACSEC ||
2751             device_id == HNS3_DEV_ID_200G_RDMA)
2752                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
2753
2754         /* Get PCI revision id */
2755         ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
2756                                   HNS3_PCI_REVISION_ID);
2757         if (ret != HNS3_PCI_REVISION_ID_LEN) {
2758                 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
2759                              ret);
2760                 return -EIO;
2761         }
2762         hw->revision = revision;
2763
2764         if (revision < PCI_REVISION_ID_HIP09_A) {
2765                 hns3_set_default_dev_specifications(hw);
2766                 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
2767                 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
2768                 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
2769                 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE;
2770                 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
2771                 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
2772                 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE;
2773                 hw->rss_info.ipv6_sctp_offload_supported = false;
2774                 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE;
2775                 pf->support_multi_tc_pause = false;
2776                 return 0;
2777         }
2778
2779         ret = hns3_query_dev_specifications(hw);
2780         if (ret) {
2781                 PMD_INIT_LOG(ERR,
2782                              "failed to query dev specifications, ret = %d",
2783                              ret);
2784                 return ret;
2785         }
2786
2787         hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
2788         hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
2789         hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
2790         hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE;
2791         hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
2792         hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
2793         pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE;
2794         hw->rss_info.ipv6_sctp_offload_supported = true;
2795         hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE;
2796         pf->support_multi_tc_pause = true;
2797
2798         return 0;
2799 }
2800
2801 static int
2802 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type)
2803 {
2804         int ret;
2805
2806         switch (media_type) {
2807         case HNS3_MEDIA_TYPE_COPPER:
2808                 if (!hns3_dev_get_support(hw, COPPER)) {
2809                         PMD_INIT_LOG(ERR,
2810                                      "Media type is copper, not supported.");
2811                         ret = -EOPNOTSUPP;
2812                 } else {
2813                         ret = 0;
2814                 }
2815                 break;
2816         case HNS3_MEDIA_TYPE_FIBER:
2817                 ret = 0;
2818                 break;
2819         case HNS3_MEDIA_TYPE_BACKPLANE:
2820                 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported.");
2821                 ret = -EOPNOTSUPP;
2822                 break;
2823         default:
2824                 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type);
2825                 ret = -EINVAL;
2826                 break;
2827         }
2828
2829         return ret;
2830 }
2831
2832 static int
2833 hns3_get_board_configuration(struct hns3_hw *hw)
2834 {
2835         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2836         struct hns3_pf *pf = &hns->pf;
2837         struct hns3_cfg cfg;
2838         int ret;
2839
2840         ret = hns3_get_board_cfg(hw, &cfg);
2841         if (ret) {
2842                 PMD_INIT_LOG(ERR, "get board config failed %d", ret);
2843                 return ret;
2844         }
2845
2846         ret = hns3_check_media_type(hw, cfg.media_type);
2847         if (ret)
2848                 return ret;
2849
2850         hw->mac.media_type = cfg.media_type;
2851         hw->rss_size_max = cfg.rss_size_max;
2852         hw->rss_dis_flag = false;
2853         memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
2854         hw->mac.phy_addr = cfg.phy_addr;
2855         hw->num_tx_desc = cfg.tqp_desc_num;
2856         hw->num_rx_desc = cfg.tqp_desc_num;
2857         hw->dcb_info.num_pg = 1;
2858         hw->dcb_info.hw_pfc_map = 0;
2859
2860         ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
2861         if (ret) {
2862                 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d",
2863                              cfg.default_speed, ret);
2864                 return ret;
2865         }
2866
2867         pf->tc_max = cfg.tc_num;
2868         if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) {
2869                 PMD_INIT_LOG(WARNING,
2870                              "Get TC num(%u) from flash, set TC num to 1",
2871                              pf->tc_max);
2872                 pf->tc_max = 1;
2873         }
2874
2875         /* Dev does not support DCB */
2876         if (!hns3_dev_get_support(hw, DCB)) {
2877                 pf->tc_max = 1;
2878                 pf->pfc_max = 0;
2879         } else
2880                 pf->pfc_max = pf->tc_max;
2881
2882         hw->dcb_info.num_tc = 1;
2883         hw->alloc_rss_size = RTE_MIN(hw->rss_size_max,
2884                                      hw->tqps_num / hw->dcb_info.num_tc);
2885         hns3_set_bit(hw->hw_tc_map, 0, 1);
2886         pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE;
2887
2888         pf->wanted_umv_size = cfg.umv_space;
2889
2890         return ret;
2891 }
2892
2893 static int
2894 hns3_get_configuration(struct hns3_hw *hw)
2895 {
2896         int ret;
2897
2898         ret = hns3_query_function_status(hw);
2899         if (ret) {
2900                 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret);
2901                 return ret;
2902         }
2903
2904         /* Get device capability */
2905         ret = hns3_get_capability(hw);
2906         if (ret) {
2907                 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
2908                 return ret;
2909         }
2910
2911         /* Get pf resource */
2912         ret = hns3_query_pf_resource(hw);
2913         if (ret) {
2914                 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret);
2915                 return ret;
2916         }
2917
2918         ret = hns3_get_board_configuration(hw);
2919         if (ret) {
2920                 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret);
2921                 return ret;
2922         }
2923
2924         ret = hns3_query_dev_fec_info(hw);
2925         if (ret)
2926                 PMD_INIT_LOG(ERR,
2927                              "failed to query FEC information, ret = %d", ret);
2928
2929         return ret;
2930 }
2931
2932 static int
2933 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid,
2934                       uint16_t tqp_vid, bool is_pf)
2935 {
2936         struct hns3_tqp_map_cmd *req;
2937         struct hns3_cmd_desc desc;
2938         int ret;
2939
2940         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false);
2941
2942         req = (struct hns3_tqp_map_cmd *)desc.data;
2943         req->tqp_id = rte_cpu_to_le_16(tqp_pid);
2944         req->tqp_vf = func_id;
2945         req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B;
2946         if (!is_pf)
2947                 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B);
2948         req->tqp_vid = rte_cpu_to_le_16(tqp_vid);
2949
2950         ret = hns3_cmd_send(hw, &desc, 1);
2951         if (ret)
2952                 PMD_INIT_LOG(ERR, "TQP map failed %d", ret);
2953
2954         return ret;
2955 }
2956
2957 static int
2958 hns3_map_tqp(struct hns3_hw *hw)
2959 {
2960         int ret;
2961         int i;
2962
2963         /*
2964          * In current version, VF is not supported when PF is driven by DPDK
2965          * driver, so we assign total tqps_num tqps allocated to this port
2966          * to PF.
2967          */
2968         for (i = 0; i < hw->total_tqps_num; i++) {
2969                 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true);
2970                 if (ret)
2971                         return ret;
2972         }
2973
2974         return 0;
2975 }
2976
2977 static int
2978 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
2979 {
2980         struct hns3_config_mac_speed_dup_cmd *req;
2981         struct hns3_cmd_desc desc;
2982         int ret;
2983
2984         req = (struct hns3_config_mac_speed_dup_cmd *)desc.data;
2985
2986         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false);
2987
2988         hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
2989
2990         switch (speed) {
2991         case RTE_ETH_SPEED_NUM_10M:
2992                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2993                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
2994                 break;
2995         case RTE_ETH_SPEED_NUM_100M:
2996                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2997                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
2998                 break;
2999         case RTE_ETH_SPEED_NUM_1G:
3000                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3001                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
3002                 break;
3003         case RTE_ETH_SPEED_NUM_10G:
3004                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3005                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
3006                 break;
3007         case RTE_ETH_SPEED_NUM_25G:
3008                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3009                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
3010                 break;
3011         case RTE_ETH_SPEED_NUM_40G:
3012                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3013                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
3014                 break;
3015         case RTE_ETH_SPEED_NUM_50G:
3016                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3017                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
3018                 break;
3019         case RTE_ETH_SPEED_NUM_100G:
3020                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3021                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
3022                 break;
3023         case RTE_ETH_SPEED_NUM_200G:
3024                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3025                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
3026                 break;
3027         default:
3028                 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed);
3029                 return -EINVAL;
3030         }
3031
3032         hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1);
3033
3034         ret = hns3_cmd_send(hw, &desc, 1);
3035         if (ret)
3036                 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret);
3037
3038         return ret;
3039 }
3040
3041 static int
3042 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3043 {
3044         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3045         struct hns3_pf *pf = &hns->pf;
3046         struct hns3_priv_buf *priv;
3047         uint32_t i, total_size;
3048
3049         total_size = pf->pkt_buf_size;
3050
3051         /* alloc tx buffer for all enabled tc */
3052         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3053                 priv = &buf_alloc->priv_buf[i];
3054
3055                 if (hw->hw_tc_map & BIT(i)) {
3056                         if (total_size < pf->tx_buf_size)
3057                                 return -ENOMEM;
3058
3059                         priv->tx_buf_size = pf->tx_buf_size;
3060                 } else
3061                         priv->tx_buf_size = 0;
3062
3063                 total_size -= priv->tx_buf_size;
3064         }
3065
3066         return 0;
3067 }
3068
3069 static int
3070 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3071 {
3072 /* TX buffer size is unit by 128 byte */
3073 #define HNS3_BUF_SIZE_UNIT_SHIFT        7
3074 #define HNS3_BUF_SIZE_UPDATE_EN_MSK     BIT(15)
3075         struct hns3_tx_buff_alloc_cmd *req;
3076         struct hns3_cmd_desc desc;
3077         uint32_t buf_size;
3078         uint32_t i;
3079         int ret;
3080
3081         req = (struct hns3_tx_buff_alloc_cmd *)desc.data;
3082
3083         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0);
3084         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3085                 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
3086
3087                 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT;
3088                 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size |
3089                                                 HNS3_BUF_SIZE_UPDATE_EN_MSK);
3090         }
3091
3092         ret = hns3_cmd_send(hw, &desc, 1);
3093         if (ret)
3094                 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret);
3095
3096         return ret;
3097 }
3098
3099 static int
3100 hns3_get_tc_num(struct hns3_hw *hw)
3101 {
3102         int cnt = 0;
3103         uint8_t i;
3104
3105         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3106                 if (hw->hw_tc_map & BIT(i))
3107                         cnt++;
3108         return cnt;
3109 }
3110
3111 static uint32_t
3112 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3113 {
3114         struct hns3_priv_buf *priv;
3115         uint32_t rx_priv = 0;
3116         int i;
3117
3118         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3119                 priv = &buf_alloc->priv_buf[i];
3120                 if (priv->enable)
3121                         rx_priv += priv->buf_size;
3122         }
3123         return rx_priv;
3124 }
3125
3126 static uint32_t
3127 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3128 {
3129         uint32_t total_tx_size = 0;
3130         uint32_t i;
3131
3132         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3133                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
3134
3135         return total_tx_size;
3136 }
3137
3138 /* Get the number of pfc enabled TCs, which have private buffer */
3139 static int
3140 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3141 {
3142         struct hns3_priv_buf *priv;
3143         int cnt = 0;
3144         uint8_t i;
3145
3146         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3147                 priv = &buf_alloc->priv_buf[i];
3148                 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3149                         cnt++;
3150         }
3151
3152         return cnt;
3153 }
3154
3155 /* Get the number of pfc disabled TCs, which have private buffer */
3156 static int
3157 hns3_get_no_pfc_priv_num(struct hns3_hw *hw,
3158                          struct hns3_pkt_buf_alloc *buf_alloc)
3159 {
3160         struct hns3_priv_buf *priv;
3161         int cnt = 0;
3162         uint8_t i;
3163
3164         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3165                 priv = &buf_alloc->priv_buf[i];
3166                 if (hw->hw_tc_map & BIT(i) &&
3167                     !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3168                         cnt++;
3169         }
3170
3171         return cnt;
3172 }
3173
3174 static bool
3175 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
3176                   uint32_t rx_all)
3177 {
3178         uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
3179         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3180         struct hns3_pf *pf = &hns->pf;
3181         uint32_t shared_buf, aligned_mps;
3182         uint32_t rx_priv;
3183         uint8_t tc_num;
3184         uint8_t i;
3185
3186         tc_num = hns3_get_tc_num(hw);
3187         aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3188
3189         if (hns3_dev_get_support(hw, DCB))
3190                 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
3191                                         pf->dv_buf_size;
3192         else
3193                 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF
3194                                         + pf->dv_buf_size;
3195
3196         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
3197         shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc),
3198                              HNS3_BUF_SIZE_UNIT);
3199
3200         rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);
3201         if (rx_all < rx_priv + shared_std)
3202                 return false;
3203
3204         shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
3205         buf_alloc->s_buf.buf_size = shared_buf;
3206         if (hns3_dev_get_support(hw, DCB)) {
3207                 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
3208                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
3209                         - roundup(aligned_mps / HNS3_BUF_DIV_BY,
3210                                   HNS3_BUF_SIZE_UNIT);
3211         } else {
3212                 buf_alloc->s_buf.self.high =
3213                         aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3214                 buf_alloc->s_buf.self.low = aligned_mps;
3215         }
3216
3217         if (hns3_dev_get_support(hw, DCB)) {
3218                 hi_thrd = shared_buf - pf->dv_buf_size;
3219
3220                 if (tc_num <= NEED_RESERVE_TC_NUM)
3221                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT /
3222                                   BUF_MAX_PERCENT;
3223
3224                 if (tc_num)
3225                         hi_thrd = hi_thrd / tc_num;
3226
3227                 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps);
3228                 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);
3229                 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;
3230         } else {
3231                 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3232                 lo_thrd = aligned_mps;
3233         }
3234
3235         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3236                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
3237                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
3238         }
3239
3240         return true;
3241 }
3242
3243 static bool
3244 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max,
3245                      struct hns3_pkt_buf_alloc *buf_alloc)
3246 {
3247         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3248         struct hns3_pf *pf = &hns->pf;
3249         struct hns3_priv_buf *priv;
3250         uint32_t aligned_mps;
3251         uint32_t rx_all;
3252         uint8_t i;
3253
3254         rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3255         aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3256
3257         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3258                 priv = &buf_alloc->priv_buf[i];
3259
3260                 priv->enable = 0;
3261                 priv->wl.low = 0;
3262                 priv->wl.high = 0;
3263                 priv->buf_size = 0;
3264
3265                 if (!(hw->hw_tc_map & BIT(i)))
3266                         continue;
3267
3268                 priv->enable = 1;
3269                 if (hw->dcb_info.hw_pfc_map & BIT(i)) {
3270                         priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT;
3271                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
3272                                                 HNS3_BUF_SIZE_UNIT);
3273                 } else {
3274                         priv->wl.low = 0;
3275                         priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) :
3276                                         aligned_mps;
3277                 }
3278
3279                 priv->buf_size = priv->wl.high + pf->dv_buf_size;
3280         }
3281
3282         return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3283 }
3284
3285 static bool
3286 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw,
3287                              struct hns3_pkt_buf_alloc *buf_alloc)
3288 {
3289         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3290         struct hns3_pf *pf = &hns->pf;
3291         struct hns3_priv_buf *priv;
3292         int no_pfc_priv_num;
3293         uint32_t rx_all;
3294         uint8_t mask;
3295         int i;
3296
3297         rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3298         no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc);
3299
3300         /* let the last to be cleared first */
3301         for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3302                 priv = &buf_alloc->priv_buf[i];
3303                 mask = BIT((uint8_t)i);
3304                 if (hw->hw_tc_map & mask &&
3305                     !(hw->dcb_info.hw_pfc_map & mask)) {
3306                         /* Clear the no pfc TC private buffer */
3307                         priv->wl.low = 0;
3308                         priv->wl.high = 0;
3309                         priv->buf_size = 0;
3310                         priv->enable = 0;
3311                         no_pfc_priv_num--;
3312                 }
3313
3314                 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3315                     no_pfc_priv_num == 0)
3316                         break;
3317         }
3318
3319         return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3320 }
3321
3322 static bool
3323 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
3324                            struct hns3_pkt_buf_alloc *buf_alloc)
3325 {
3326         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3327         struct hns3_pf *pf = &hns->pf;
3328         struct hns3_priv_buf *priv;
3329         uint32_t rx_all;
3330         int pfc_priv_num;
3331         uint8_t mask;
3332         int i;
3333
3334         rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3335         pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc);
3336
3337         /* let the last to be cleared first */
3338         for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3339                 priv = &buf_alloc->priv_buf[i];
3340                 mask = BIT((uint8_t)i);
3341                 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) {
3342                         /* Reduce the number of pfc TC with private buffer */
3343                         priv->wl.low = 0;
3344                         priv->enable = 0;
3345                         priv->wl.high = 0;
3346                         priv->buf_size = 0;
3347                         pfc_priv_num--;
3348                 }
3349                 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3350                     pfc_priv_num == 0)
3351                         break;
3352         }
3353
3354         return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3355 }
3356
3357 static bool
3358 hns3_only_alloc_priv_buff(struct hns3_hw *hw,
3359                           struct hns3_pkt_buf_alloc *buf_alloc)
3360 {
3361 #define COMPENSATE_BUFFER       0x3C00
3362 #define COMPENSATE_HALF_MPS_NUM 5
3363 #define PRIV_WL_GAP             0x1800
3364         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3365         struct hns3_pf *pf = &hns->pf;
3366         uint32_t tc_num = hns3_get_tc_num(hw);
3367         uint32_t half_mps = pf->mps >> 1;
3368         struct hns3_priv_buf *priv;
3369         uint32_t min_rx_priv;
3370         uint32_t rx_priv;
3371         uint8_t i;
3372
3373         rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3374         if (tc_num)
3375                 rx_priv = rx_priv / tc_num;
3376
3377         if (tc_num <= NEED_RESERVE_TC_NUM)
3378                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
3379
3380         /*
3381          * Minimum value of private buffer in rx direction (min_rx_priv) is
3382          * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private
3383          * buffer if rx_priv is greater than min_rx_priv.
3384          */
3385         min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER +
3386                         COMPENSATE_HALF_MPS_NUM * half_mps;
3387         min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);
3388         rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);
3389         if (rx_priv < min_rx_priv)
3390                 return false;
3391
3392         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3393                 priv = &buf_alloc->priv_buf[i];
3394                 priv->enable = 0;
3395                 priv->wl.low = 0;
3396                 priv->wl.high = 0;
3397                 priv->buf_size = 0;
3398
3399                 if (!(hw->hw_tc_map & BIT(i)))
3400                         continue;
3401
3402                 priv->enable = 1;
3403                 priv->buf_size = rx_priv;
3404                 priv->wl.high = rx_priv - pf->dv_buf_size;
3405                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
3406         }
3407
3408         buf_alloc->s_buf.buf_size = 0;
3409
3410         return true;
3411 }
3412
3413 /*
3414  * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
3415  * @hw: pointer to struct hns3_hw
3416  * @buf_alloc: pointer to buffer calculation data
3417  * @return: 0: calculate successful, negative: fail
3418  */
3419 static int
3420 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3421 {
3422         /* When DCB is not supported, rx private buffer is not allocated. */
3423         if (!hns3_dev_get_support(hw, DCB)) {
3424                 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3425                 struct hns3_pf *pf = &hns->pf;
3426                 uint32_t rx_all = pf->pkt_buf_size;
3427
3428                 rx_all -= hns3_get_tx_buff_alloced(buf_alloc);
3429                 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all))
3430                         return -ENOMEM;
3431
3432                 return 0;
3433         }
3434
3435         /*
3436          * Try to allocate privated packet buffer for all TCs without share
3437          * buffer.
3438          */
3439         if (hns3_only_alloc_priv_buff(hw, buf_alloc))
3440                 return 0;
3441
3442         /*
3443          * Try to allocate privated packet buffer for all TCs with share
3444          * buffer.
3445          */
3446         if (hns3_rx_buf_calc_all(hw, true, buf_alloc))
3447                 return 0;
3448
3449         /*
3450          * For different application scenes, the enabled port number, TC number
3451          * and no_drop TC number are different. In order to obtain the better
3452          * performance, software could allocate the buffer size and configure
3453          * the waterline by trying to decrease the private buffer size according
3454          * to the order, namely, waterline of valid tc, pfc disabled tc, pfc
3455          * enabled tc.
3456          */
3457         if (hns3_rx_buf_calc_all(hw, false, buf_alloc))
3458                 return 0;
3459
3460         if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc))
3461                 return 0;
3462
3463         if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc))
3464                 return 0;
3465
3466         return -ENOMEM;
3467 }
3468
3469 static int
3470 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3471 {
3472         struct hns3_rx_priv_buff_cmd *req;
3473         struct hns3_cmd_desc desc;
3474         uint32_t buf_size;
3475         int ret;
3476         int i;
3477
3478         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false);
3479         req = (struct hns3_rx_priv_buff_cmd *)desc.data;
3480
3481         /* Alloc private buffer TCs */
3482         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3483                 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i];
3484
3485                 req->buf_num[i] =
3486                         rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S);
3487                 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B);
3488         }
3489
3490         buf_size = buf_alloc->s_buf.buf_size;
3491         req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) |
3492                                            (1 << HNS3_TC0_PRI_BUF_EN_B));
3493
3494         ret = hns3_cmd_send(hw, &desc, 1);
3495         if (ret)
3496                 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret);
3497
3498         return ret;
3499 }
3500
3501 static int
3502 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3503 {
3504 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2
3505         struct hns3_rx_priv_wl_buf *req;
3506         struct hns3_priv_buf *priv;
3507         struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM];
3508         int i, j;
3509         int ret;
3510
3511         for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) {
3512                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC,
3513                                           false);
3514                 req = (struct hns3_rx_priv_wl_buf *)desc[i].data;
3515
3516                 /* The first descriptor set the NEXT bit to 1 */
3517                 if (i == 0)
3518                         desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3519                 else
3520                         desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3521
3522                 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3523                         uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j;
3524
3525                         priv = &buf_alloc->priv_buf[idx];
3526                         req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >>
3527                                                         HNS3_BUF_UNIT_S);
3528                         req->tc_wl[j].high |=
3529                                 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3530                         req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >>
3531                                                         HNS3_BUF_UNIT_S);
3532                         req->tc_wl[j].low |=
3533                                 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3534                 }
3535         }
3536
3537         /* Send 2 descriptor at one time */
3538         ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM);
3539         if (ret)
3540                 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d",
3541                              ret);
3542         return ret;
3543 }
3544
3545 static int
3546 hns3_common_thrd_config(struct hns3_hw *hw,
3547                         struct hns3_pkt_buf_alloc *buf_alloc)
3548 {
3549 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2
3550         struct hns3_shared_buf *s_buf = &buf_alloc->s_buf;
3551         struct hns3_rx_com_thrd *req;
3552         struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM];
3553         struct hns3_tc_thrd *tc;
3554         int tc_idx;
3555         int i, j;
3556         int ret;
3557
3558         for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) {
3559                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC,
3560                                           false);
3561                 req = (struct hns3_rx_com_thrd *)&desc[i].data;
3562
3563                 /* The first descriptor set the NEXT bit to 1 */
3564                 if (i == 0)
3565                         desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3566                 else
3567                         desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3568
3569                 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3570                         tc_idx = i * HNS3_TC_NUM_ONE_DESC + j;
3571                         tc = &s_buf->tc_thrd[tc_idx];
3572
3573                         req->com_thrd[j].high =
3574                                 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S);
3575                         req->com_thrd[j].high |=
3576                                  rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3577                         req->com_thrd[j].low =
3578                                 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S);
3579                         req->com_thrd[j].low |=
3580                                  rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3581                 }
3582         }
3583
3584         /* Send 2 descriptors at one time */
3585         ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM);
3586         if (ret)
3587                 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret);
3588
3589         return ret;
3590 }
3591
3592 static int
3593 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3594 {
3595         struct hns3_shared_buf *buf = &buf_alloc->s_buf;
3596         struct hns3_rx_com_wl *req;
3597         struct hns3_cmd_desc desc;
3598         int ret;
3599
3600         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false);
3601
3602         req = (struct hns3_rx_com_wl *)desc.data;
3603         req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S);
3604         req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3605
3606         req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S);
3607         req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3608
3609         ret = hns3_cmd_send(hw, &desc, 1);
3610         if (ret)
3611                 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret);
3612
3613         return ret;
3614 }
3615
3616 int
3617 hns3_buffer_alloc(struct hns3_hw *hw)
3618 {
3619         struct hns3_pkt_buf_alloc pkt_buf;
3620         int ret;
3621
3622         memset(&pkt_buf, 0, sizeof(pkt_buf));
3623         ret = hns3_tx_buffer_calc(hw, &pkt_buf);
3624         if (ret) {
3625                 PMD_INIT_LOG(ERR,
3626                              "could not calc tx buffer size for all TCs %d",
3627                              ret);
3628                 return ret;
3629         }
3630
3631         ret = hns3_tx_buffer_alloc(hw, &pkt_buf);
3632         if (ret) {
3633                 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret);
3634                 return ret;
3635         }
3636
3637         ret = hns3_rx_buffer_calc(hw, &pkt_buf);
3638         if (ret) {
3639                 PMD_INIT_LOG(ERR,
3640                              "could not calc rx priv buffer size for all TCs %d",
3641                              ret);
3642                 return ret;
3643         }
3644
3645         ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf);
3646         if (ret) {
3647                 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret);
3648                 return ret;
3649         }
3650
3651         if (hns3_dev_get_support(hw, DCB)) {
3652                 ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
3653                 if (ret) {
3654                         PMD_INIT_LOG(ERR,
3655                                      "could not configure rx private waterline %d",
3656                                      ret);
3657                         return ret;
3658                 }
3659
3660                 ret = hns3_common_thrd_config(hw, &pkt_buf);
3661                 if (ret) {
3662                         PMD_INIT_LOG(ERR,
3663                                      "could not configure common threshold %d",
3664                                      ret);
3665                         return ret;
3666                 }
3667         }
3668
3669         ret = hns3_common_wl_config(hw, &pkt_buf);
3670         if (ret)
3671                 PMD_INIT_LOG(ERR, "could not configure common waterline %d",
3672                              ret);
3673
3674         return ret;
3675 }
3676
3677 static int
3678 hns3_mac_init(struct hns3_hw *hw)
3679 {
3680         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3681         struct hns3_mac *mac = &hw->mac;
3682         struct hns3_pf *pf = &hns->pf;
3683         int ret;
3684
3685         pf->support_sfp_query = true;
3686         mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3687         ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
3688         if (ret) {
3689                 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
3690                 return ret;
3691         }
3692
3693         mac->link_status = RTE_ETH_LINK_DOWN;
3694
3695         return hns3_config_mtu(hw, pf->mps);
3696 }
3697
3698 static int
3699 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
3700 {
3701 #define HNS3_ETHERTYPE_SUCCESS_ADD              0
3702 #define HNS3_ETHERTYPE_ALREADY_ADD              1
3703 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW         2
3704 #define HNS3_ETHERTYPE_KEY_CONFLICT             3
3705         int return_status;
3706
3707         if (cmdq_resp) {
3708                 PMD_INIT_LOG(ERR,
3709                              "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
3710                              cmdq_resp);
3711                 return -EIO;
3712         }
3713
3714         switch (resp_code) {
3715         case HNS3_ETHERTYPE_SUCCESS_ADD:
3716         case HNS3_ETHERTYPE_ALREADY_ADD:
3717                 return_status = 0;
3718                 break;
3719         case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW:
3720                 PMD_INIT_LOG(ERR,
3721                              "add mac ethertype failed for manager table overflow.");
3722                 return_status = -EIO;
3723                 break;
3724         case HNS3_ETHERTYPE_KEY_CONFLICT:
3725                 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict.");
3726                 return_status = -EIO;
3727                 break;
3728         default:
3729                 PMD_INIT_LOG(ERR,
3730                              "add mac ethertype failed for undefined, code=%u.",
3731                              resp_code);
3732                 return_status = -EIO;
3733                 break;
3734         }
3735
3736         return return_status;
3737 }
3738
3739 static int
3740 hns3_add_mgr_tbl(struct hns3_hw *hw,
3741                  const struct hns3_mac_mgr_tbl_entry_cmd *req)
3742 {
3743         struct hns3_cmd_desc desc;
3744         uint8_t resp_code;
3745         uint16_t retval;
3746         int ret;
3747
3748         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false);
3749         memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd));
3750
3751         ret = hns3_cmd_send(hw, &desc, 1);
3752         if (ret) {
3753                 PMD_INIT_LOG(ERR,
3754                              "add mac ethertype failed for cmd_send, ret =%d.",
3755                              ret);
3756                 return ret;
3757         }
3758
3759         resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
3760         retval = rte_le_to_cpu_16(desc.retval);
3761
3762         return hns3_get_mac_ethertype_cmd_status(retval, resp_code);
3763 }
3764
3765 static void
3766 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table,
3767                      int *table_item_num)
3768 {
3769         struct hns3_mac_mgr_tbl_entry_cmd *tbl;
3770
3771         /*
3772          * In current version, we add one item in management table as below:
3773          * 0x0180C200000E -- LLDP MC address
3774          */
3775         tbl = mgr_table;
3776         tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B;
3777         tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP);
3778         tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200));
3779         tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E));
3780         tbl->i_port_bitmap = 0x1;
3781         *table_item_num = 1;
3782 }
3783
3784 static int
3785 hns3_init_mgr_tbl(struct hns3_hw *hw)
3786 {
3787 #define HNS_MAC_MGR_TBL_MAX_SIZE        16
3788         struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE];
3789         int table_item_num;
3790         int ret;
3791         int i;
3792
3793         memset(mgr_table, 0, sizeof(mgr_table));
3794         hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
3795         for (i = 0; i < table_item_num; i++) {
3796                 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
3797                 if (ret) {
3798                         PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
3799                                      ret);
3800                         return ret;
3801                 }
3802         }
3803
3804         return 0;
3805 }
3806
3807 static void
3808 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
3809                         bool en_mc, bool en_bc, int vport_id)
3810 {
3811         if (!param)
3812                 return;
3813
3814         memset(param, 0, sizeof(struct hns3_promisc_param));
3815         if (en_uc)
3816                 param->enable = HNS3_PROMISC_EN_UC;
3817         if (en_mc)
3818                 param->enable |= HNS3_PROMISC_EN_MC;
3819         if (en_bc)
3820                 param->enable |= HNS3_PROMISC_EN_BC;
3821         param->vf_id = vport_id;
3822 }
3823
3824 static int
3825 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
3826 {
3827         struct hns3_promisc_cfg_cmd *req;
3828         struct hns3_cmd_desc desc;
3829         int ret;
3830
3831         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
3832
3833         req = (struct hns3_promisc_cfg_cmd *)desc.data;
3834         req->vf_id = param->vf_id;
3835         req->flag = (param->enable << HNS3_PROMISC_EN_B) |
3836             HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
3837
3838         ret = hns3_cmd_send(hw, &desc, 1);
3839         if (ret)
3840                 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret);
3841
3842         return ret;
3843 }
3844
3845 static int
3846 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
3847 {
3848         struct hns3_promisc_param param;
3849         bool en_bc_pmc = true;
3850         uint8_t vf_id;
3851
3852         /*
3853          * In current version VF is not supported when PF is driven by DPDK
3854          * driver, just need to configure parameters for PF vport.
3855          */
3856         vf_id = HNS3_PF_FUNC_ID;
3857
3858         hns3_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
3859         return hns3_cmd_set_promisc_mode(hw, &param);
3860 }
3861
3862 static int
3863 hns3_promisc_init(struct hns3_hw *hw)
3864 {
3865         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3866         struct hns3_pf *pf = &hns->pf;
3867         struct hns3_promisc_param param;
3868         uint16_t func_id;
3869         int ret;
3870
3871         ret = hns3_set_promisc_mode(hw, false, false);
3872         if (ret) {
3873                 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret);
3874                 return ret;
3875         }
3876
3877         /*
3878          * In current version VFs are not supported when PF is driven by DPDK
3879          * driver. After PF has been taken over by DPDK, the original VF will
3880          * be invalid. So, there is a possibility of entry residues. It should
3881          * clear VFs's promisc mode to avoid unnecessary bandwidth usage
3882          * during init.
3883          */
3884         for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) {
3885                 hns3_promisc_param_init(&param, false, false, false, func_id);
3886                 ret = hns3_cmd_set_promisc_mode(hw, &param);
3887                 if (ret) {
3888                         PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode,"
3889                                         " ret = %d", func_id, ret);
3890                         return ret;
3891                 }
3892         }
3893
3894         return 0;
3895 }
3896
3897 static void
3898 hns3_promisc_uninit(struct hns3_hw *hw)
3899 {
3900         struct hns3_promisc_param param;
3901         uint16_t func_id;
3902         int ret;
3903
3904         func_id = HNS3_PF_FUNC_ID;
3905
3906         /*
3907          * In current version VFs are not supported when PF is driven by
3908          * DPDK driver, and VFs' promisc mode status has been cleared during
3909          * init and their status will not change. So just clear PF's promisc
3910          * mode status during uninit.
3911          */
3912         hns3_promisc_param_init(&param, false, false, false, func_id);
3913         ret = hns3_cmd_set_promisc_mode(hw, &param);
3914         if (ret)
3915                 PMD_INIT_LOG(ERR, "failed to clear promisc status during"
3916                                 " uninit, ret = %d", ret);
3917 }
3918
3919 static int
3920 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
3921 {
3922         bool allmulti = dev->data->all_multicast ? true : false;
3923         struct hns3_adapter *hns = dev->data->dev_private;
3924         struct hns3_hw *hw = &hns->hw;
3925         uint64_t offloads;
3926         int err;
3927         int ret;
3928
3929         rte_spinlock_lock(&hw->lock);
3930         ret = hns3_set_promisc_mode(hw, true, true);
3931         if (ret) {
3932                 rte_spinlock_unlock(&hw->lock);
3933                 hns3_err(hw, "failed to enable promiscuous mode, ret = %d",
3934                          ret);
3935                 return ret;
3936         }
3937
3938         /*
3939          * When promiscuous mode was enabled, disable the vlan filter to let
3940          * all packets coming in in the receiving direction.
3941          */
3942         offloads = dev->data->dev_conf.rxmode.offloads;
3943         if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
3944                 ret = hns3_enable_vlan_filter(hns, false);
3945                 if (ret) {
3946                         hns3_err(hw, "failed to enable promiscuous mode due to "
3947                                      "failure to disable vlan filter, ret = %d",
3948                                  ret);
3949                         err = hns3_set_promisc_mode(hw, false, allmulti);
3950                         if (err)
3951                                 hns3_err(hw, "failed to restore promiscuous "
3952                                          "status after disable vlan filter "
3953                                          "failed during enabling promiscuous "
3954                                          "mode, ret = %d", ret);
3955                 }
3956         }
3957
3958         rte_spinlock_unlock(&hw->lock);
3959
3960         return ret;
3961 }
3962
3963 static int
3964 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
3965 {
3966         bool allmulti = dev->data->all_multicast ? true : false;
3967         struct hns3_adapter *hns = dev->data->dev_private;
3968         struct hns3_hw *hw = &hns->hw;
3969         uint64_t offloads;
3970         int err;
3971         int ret;
3972
3973         /* If now in all_multicast mode, must remain in all_multicast mode. */
3974         rte_spinlock_lock(&hw->lock);
3975         ret = hns3_set_promisc_mode(hw, false, allmulti);
3976         if (ret) {
3977                 rte_spinlock_unlock(&hw->lock);
3978                 hns3_err(hw, "failed to disable promiscuous mode, ret = %d",
3979                          ret);
3980                 return ret;
3981         }
3982         /* when promiscuous mode was disabled, restore the vlan filter status */
3983         offloads = dev->data->dev_conf.rxmode.offloads;
3984         if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
3985                 ret = hns3_enable_vlan_filter(hns, true);
3986                 if (ret) {
3987                         hns3_err(hw, "failed to disable promiscuous mode due to"
3988                                  " failure to restore vlan filter, ret = %d",
3989                                  ret);
3990                         err = hns3_set_promisc_mode(hw, true, true);
3991                         if (err)
3992                                 hns3_err(hw, "failed to restore promiscuous "
3993                                          "status after enabling vlan filter "
3994                                          "failed during disabling promiscuous "
3995                                          "mode, ret = %d", ret);
3996                 }
3997         }
3998         rte_spinlock_unlock(&hw->lock);
3999
4000         return ret;
4001 }
4002
4003 static int
4004 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
4005 {
4006         struct hns3_adapter *hns = dev->data->dev_private;
4007         struct hns3_hw *hw = &hns->hw;
4008         int ret;
4009
4010         if (dev->data->promiscuous)
4011                 return 0;
4012
4013         rte_spinlock_lock(&hw->lock);
4014         ret = hns3_set_promisc_mode(hw, false, true);
4015         rte_spinlock_unlock(&hw->lock);
4016         if (ret)
4017                 hns3_err(hw, "failed to enable allmulticast mode, ret = %d",
4018                          ret);
4019
4020         return ret;
4021 }
4022
4023 static int
4024 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
4025 {
4026         struct hns3_adapter *hns = dev->data->dev_private;
4027         struct hns3_hw *hw = &hns->hw;
4028         int ret;
4029
4030         /* If now in promiscuous mode, must remain in all_multicast mode. */
4031         if (dev->data->promiscuous)
4032                 return 0;
4033
4034         rte_spinlock_lock(&hw->lock);
4035         ret = hns3_set_promisc_mode(hw, false, false);
4036         rte_spinlock_unlock(&hw->lock);
4037         if (ret)
4038                 hns3_err(hw, "failed to disable allmulticast mode, ret = %d",
4039                          ret);
4040
4041         return ret;
4042 }
4043
4044 static int
4045 hns3_dev_promisc_restore(struct hns3_adapter *hns)
4046 {
4047         struct hns3_hw *hw = &hns->hw;
4048         bool allmulti = hw->data->all_multicast ? true : false;
4049         int ret;
4050
4051         if (hw->data->promiscuous) {
4052                 ret = hns3_set_promisc_mode(hw, true, true);
4053                 if (ret)
4054                         hns3_err(hw, "failed to restore promiscuous mode, "
4055                                  "ret = %d", ret);
4056                 return ret;
4057         }
4058
4059         ret = hns3_set_promisc_mode(hw, false, allmulti);
4060         if (ret)
4061                 hns3_err(hw, "failed to restore allmulticast mode, ret = %d",
4062                          ret);
4063         return ret;
4064 }
4065
4066 static int
4067 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
4068 {
4069         struct hns3_sfp_info_cmd *resp;
4070         struct hns3_cmd_desc desc;
4071         int ret;
4072
4073         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
4074         resp = (struct hns3_sfp_info_cmd *)desc.data;
4075         resp->query_type = HNS3_ACTIVE_QUERY;
4076
4077         ret = hns3_cmd_send(hw, &desc, 1);
4078         if (ret == -EOPNOTSUPP) {
4079                 hns3_warn(hw, "firmware does not support get SFP info,"
4080                           " ret = %d.", ret);
4081                 return ret;
4082         } else if (ret) {
4083                 hns3_err(hw, "get sfp info failed, ret = %d.", ret);
4084                 return ret;
4085         }
4086
4087         /*
4088          * In some case, the speed of MAC obtained from firmware may be 0, it
4089          * shouldn't be set to mac->speed.
4090          */
4091         if (!rte_le_to_cpu_32(resp->sfp_speed))
4092                 return 0;
4093
4094         mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed);
4095         /*
4096          * if resp->supported_speed is 0, it means it's an old version
4097          * firmware, do not update these params.
4098          */
4099         if (resp->supported_speed) {
4100                 mac_info->query_type = HNS3_ACTIVE_QUERY;
4101                 mac_info->supported_speed =
4102                                         rte_le_to_cpu_32(resp->supported_speed);
4103                 mac_info->support_autoneg = resp->autoneg_ability;
4104                 mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
4105                                         : RTE_ETH_LINK_AUTONEG;
4106         } else {
4107                 mac_info->query_type = HNS3_DEFAULT_QUERY;
4108         }
4109
4110         return 0;
4111 }
4112
4113 static uint8_t
4114 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
4115 {
4116         if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
4117                 duplex = RTE_ETH_LINK_FULL_DUPLEX;
4118
4119         return duplex;
4120 }
4121
4122 static int
4123 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
4124 {
4125         struct hns3_mac *mac = &hw->mac;
4126         int ret;
4127
4128         duplex = hns3_check_speed_dup(duplex, speed);
4129         if (mac->link_speed == speed && mac->link_duplex == duplex)
4130                 return 0;
4131
4132         ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex);
4133         if (ret)
4134                 return ret;
4135
4136         ret = hns3_port_shaper_update(hw, speed);
4137         if (ret)
4138                 return ret;
4139
4140         mac->link_speed = speed;
4141         mac->link_duplex = duplex;
4142
4143         return 0;
4144 }
4145
4146 static int
4147 hns3_update_fiber_link_info(struct hns3_hw *hw)
4148 {
4149         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
4150         struct hns3_mac *mac = &hw->mac;
4151         struct hns3_mac mac_info;
4152         int ret;
4153
4154         /* If firmware do not support get SFP/qSFP speed, return directly */
4155         if (!pf->support_sfp_query)
4156                 return 0;
4157
4158         memset(&mac_info, 0, sizeof(struct hns3_mac));
4159         ret = hns3_get_sfp_info(hw, &mac_info);
4160         if (ret == -EOPNOTSUPP) {
4161                 pf->support_sfp_query = false;
4162                 return ret;
4163         } else if (ret)
4164                 return ret;
4165
4166         /* Do nothing if no SFP */
4167         if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
4168                 return 0;
4169
4170         /*
4171          * If query_type is HNS3_ACTIVE_QUERY, it is no need
4172          * to reconfigure the speed of MAC. Otherwise, it indicates
4173          * that the current firmware only supports to obtain the
4174          * speed of the SFP, and the speed of MAC needs to reconfigure.
4175          */
4176         mac->query_type = mac_info.query_type;
4177         if (mac->query_type == HNS3_ACTIVE_QUERY) {
4178                 if (mac_info.link_speed != mac->link_speed) {
4179                         ret = hns3_port_shaper_update(hw, mac_info.link_speed);
4180                         if (ret)
4181                                 return ret;
4182                 }
4183
4184                 mac->link_speed = mac_info.link_speed;
4185                 mac->supported_speed = mac_info.supported_speed;
4186                 mac->support_autoneg = mac_info.support_autoneg;
4187                 mac->link_autoneg = mac_info.link_autoneg;
4188
4189                 return 0;
4190         }
4191
4192         /* Config full duplex for SFP */
4193         return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
4194                                       RTE_ETH_LINK_FULL_DUPLEX);
4195 }
4196
4197 static void
4198 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac)
4199 {
4200 #define HNS3_PHY_SUPPORTED_SPEED_MASK   0x2f
4201
4202         struct hns3_phy_params_bd0_cmd *req;
4203         uint32_t supported;
4204
4205         req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
4206         mac->link_speed = rte_le_to_cpu_32(req->speed);
4207         mac->link_duplex = hns3_get_bit(req->duplex,
4208                                            HNS3_PHY_DUPLEX_CFG_B);
4209         mac->link_autoneg = hns3_get_bit(req->autoneg,
4210                                            HNS3_PHY_AUTONEG_CFG_B);
4211         mac->advertising = rte_le_to_cpu_32(req->advertising);
4212         mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising);
4213         supported = rte_le_to_cpu_32(req->supported);
4214         mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK;
4215         mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT);
4216 }
4217
4218 static int
4219 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac)
4220 {
4221         struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
4222         uint16_t i;
4223         int ret;
4224
4225         for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
4226                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
4227                                           true);
4228                 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4229         }
4230         hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true);
4231
4232         ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
4233         if (ret) {
4234                 hns3_err(hw, "get phy parameters failed, ret = %d.", ret);
4235                 return ret;
4236         }
4237
4238         hns3_parse_copper_phy_params(desc, mac);
4239
4240         return 0;
4241 }
4242
4243 static int
4244 hns3_update_copper_link_info(struct hns3_hw *hw)
4245 {
4246         struct hns3_mac *mac = &hw->mac;
4247         struct hns3_mac mac_info;
4248         int ret;
4249
4250         memset(&mac_info, 0, sizeof(struct hns3_mac));
4251         ret = hns3_get_copper_phy_params(hw, &mac_info);
4252         if (ret)
4253                 return ret;
4254
4255         if (mac_info.link_speed != mac->link_speed) {
4256                 ret = hns3_port_shaper_update(hw, mac_info.link_speed);
4257                 if (ret)
4258                         return ret;
4259         }
4260
4261         mac->link_speed = mac_info.link_speed;
4262         mac->link_duplex = mac_info.link_duplex;
4263         mac->link_autoneg = mac_info.link_autoneg;
4264         mac->supported_speed = mac_info.supported_speed;
4265         mac->advertising = mac_info.advertising;
4266         mac->lp_advertising = mac_info.lp_advertising;
4267         mac->support_autoneg = mac_info.support_autoneg;
4268
4269         return 0;
4270 }
4271
4272 static int
4273 hns3_update_link_info(struct rte_eth_dev *eth_dev)
4274 {
4275         struct hns3_adapter *hns = eth_dev->data->dev_private;
4276         struct hns3_hw *hw = &hns->hw;
4277         int ret = 0;
4278
4279         if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER)
4280                 ret = hns3_update_copper_link_info(hw);
4281         else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER)
4282                 ret = hns3_update_fiber_link_info(hw);
4283
4284         return ret;
4285 }
4286
4287 static int
4288 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
4289 {
4290         struct hns3_config_mac_mode_cmd *req;
4291         struct hns3_cmd_desc desc;
4292         uint32_t loop_en = 0;
4293         uint8_t val = 0;
4294         int ret;
4295
4296         req = (struct hns3_config_mac_mode_cmd *)desc.data;
4297
4298         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false);
4299         if (enable)
4300                 val = 1;
4301         hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val);
4302         hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val);
4303         hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val);
4304         hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val);
4305         hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0);
4306         hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0);
4307         hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0);
4308         hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0);
4309         hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val);
4310         hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
4311
4312         /*
4313          * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
4314          * when receiving frames. Otherwise, CRC will be stripped.
4315          */
4316         if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
4317                 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
4318         else
4319                 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
4320         hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val);
4321         hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val);
4322         hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val);
4323         req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en);
4324
4325         ret = hns3_cmd_send(hw, &desc, 1);
4326         if (ret)
4327                 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret);
4328
4329         return ret;
4330 }
4331
4332 static int
4333 hns3_get_mac_link_status(struct hns3_hw *hw)
4334 {
4335         struct hns3_link_status_cmd *req;
4336         struct hns3_cmd_desc desc;
4337         int link_status;
4338         int ret;
4339
4340         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true);
4341         ret = hns3_cmd_send(hw, &desc, 1);
4342         if (ret) {
4343                 hns3_err(hw, "get link status cmd failed %d", ret);
4344                 return RTE_ETH_LINK_DOWN;
4345         }
4346
4347         req = (struct hns3_link_status_cmd *)desc.data;
4348         link_status = req->status & HNS3_LINK_STATUS_UP_M;
4349
4350         return !!link_status;
4351 }
4352
4353 static bool
4354 hns3_update_link_status(struct hns3_hw *hw)
4355 {
4356         int state;
4357
4358         state = hns3_get_mac_link_status(hw);
4359         if (state != hw->mac.link_status) {
4360                 hw->mac.link_status = state;
4361                 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down");
4362                 return true;
4363         }
4364
4365         return false;
4366 }
4367
4368 void
4369 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query)
4370 {
4371         struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
4372         struct rte_eth_link new_link;
4373         int ret;
4374
4375         if (query)
4376                 hns3_update_port_link_info(dev);
4377
4378         memset(&new_link, 0, sizeof(new_link));
4379         hns3_setup_linkstatus(dev, &new_link);
4380
4381         ret = rte_eth_linkstatus_set(dev, &new_link);
4382         if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
4383                 hns3_start_report_lse(dev);
4384 }
4385
4386 static void
4387 hns3_service_handler(void *param)
4388 {
4389         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
4390         struct hns3_adapter *hns = eth_dev->data->dev_private;
4391         struct hns3_hw *hw = &hns->hw;
4392
4393         if (!hns3_is_reset_pending(hns))
4394                 hns3_update_linkstatus_and_event(hw, true);
4395         else
4396                 hns3_warn(hw, "Cancel the query when reset is pending");
4397
4398         rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
4399 }
4400
4401 static int
4402 hns3_init_hardware(struct hns3_adapter *hns)
4403 {
4404         struct hns3_hw *hw = &hns->hw;
4405         int ret;
4406
4407         ret = hns3_map_tqp(hw);
4408         if (ret) {
4409                 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
4410                 return ret;
4411         }
4412
4413         ret = hns3_init_umv_space(hw);
4414         if (ret) {
4415                 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret);
4416                 return ret;
4417         }
4418
4419         ret = hns3_mac_init(hw);
4420         if (ret) {
4421                 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret);
4422                 goto err_mac_init;
4423         }
4424
4425         ret = hns3_init_mgr_tbl(hw);
4426         if (ret) {
4427                 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret);
4428                 goto err_mac_init;
4429         }
4430
4431         ret = hns3_promisc_init(hw);
4432         if (ret) {
4433                 PMD_INIT_LOG(ERR, "Failed to init promisc: %d",
4434                              ret);
4435                 goto err_mac_init;
4436         }
4437
4438         ret = hns3_init_vlan_config(hns);
4439         if (ret) {
4440                 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret);
4441                 goto err_mac_init;
4442         }
4443
4444         ret = hns3_dcb_init(hw);
4445         if (ret) {
4446                 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret);
4447                 goto err_mac_init;
4448         }
4449
4450         ret = hns3_init_fd_config(hns);
4451         if (ret) {
4452                 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret);
4453                 goto err_mac_init;
4454         }
4455
4456         ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
4457         if (ret) {
4458                 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
4459                 goto err_mac_init;
4460         }
4461
4462         ret = hns3_config_gro(hw, false);
4463         if (ret) {
4464                 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
4465                 goto err_mac_init;
4466         }
4467
4468         /*
4469          * In the initialization clearing the all hardware mapping relationship
4470          * configurations between queues and interrupt vectors is needed, so
4471          * some error caused by the residual configurations, such as the
4472          * unexpected interrupt, can be avoid.
4473          */
4474         ret = hns3_init_ring_with_vector(hw);
4475         if (ret) {
4476                 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
4477                 goto err_mac_init;
4478         }
4479
4480         return 0;
4481
4482 err_mac_init:
4483         hns3_uninit_umv_space(hw);
4484         return ret;
4485 }
4486
4487 static int
4488 hns3_clear_hw(struct hns3_hw *hw)
4489 {
4490         struct hns3_cmd_desc desc;
4491         int ret;
4492
4493         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false);
4494
4495         ret = hns3_cmd_send(hw, &desc, 1);
4496         if (ret && ret != -EOPNOTSUPP)
4497                 return ret;
4498
4499         return 0;
4500 }
4501
4502 static void
4503 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable)
4504 {
4505         uint32_t val;
4506
4507         /*
4508          * The new firmware support report more hardware error types by
4509          * msix mode. These errors are defined as RAS errors in hardware
4510          * and belong to a different type from the MSI-x errors processed
4511          * by the network driver.
4512          *
4513          * Network driver should open the new error report on initialization.
4514          */
4515         val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
4516         hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0);
4517         hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val);
4518 }
4519
4520 static uint32_t
4521 hns3_set_firber_default_support_speed(struct hns3_hw *hw)
4522 {
4523         struct hns3_mac *mac = &hw->mac;
4524
4525         switch (mac->link_speed) {
4526         case RTE_ETH_SPEED_NUM_1G:
4527                 return HNS3_FIBER_LINK_SPEED_1G_BIT;
4528         case RTE_ETH_SPEED_NUM_10G:
4529                 return HNS3_FIBER_LINK_SPEED_10G_BIT;
4530         case RTE_ETH_SPEED_NUM_25G:
4531                 return HNS3_FIBER_LINK_SPEED_25G_BIT;
4532         case RTE_ETH_SPEED_NUM_40G:
4533                 return HNS3_FIBER_LINK_SPEED_40G_BIT;
4534         case RTE_ETH_SPEED_NUM_50G:
4535                 return HNS3_FIBER_LINK_SPEED_50G_BIT;
4536         case RTE_ETH_SPEED_NUM_100G:
4537                 return HNS3_FIBER_LINK_SPEED_100G_BIT;
4538         case RTE_ETH_SPEED_NUM_200G:
4539                 return HNS3_FIBER_LINK_SPEED_200G_BIT;
4540         default:
4541                 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
4542                 return 0;
4543         }
4544 }
4545
4546 /*
4547  * Validity of supported_speed for fiber and copper media type can be
4548  * guaranteed by the following policy:
4549  * Copper:
4550  *       Although the initialization of the phy in the firmware may not be
4551  *       completed, the firmware can guarantees that the supported_speed is
4552  *       an valid value.
4553  * Firber:
4554  *       If the version of firmware supports the active query way of the
4555  *       HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained
4556  *       through it. If unsupported, use the SFP's speed as the value of the
4557  *       supported_speed.
4558  */
4559 static int
4560 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev)
4561 {
4562         struct hns3_adapter *hns = eth_dev->data->dev_private;
4563         struct hns3_hw *hw = &hns->hw;
4564         struct hns3_mac *mac = &hw->mac;
4565         int ret;
4566
4567         ret = hns3_update_link_info(eth_dev);
4568         if (ret)
4569                 return ret;
4570
4571         if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) {
4572                 /*
4573                  * Some firmware does not support the report of supported_speed,
4574                  * and only report the effective speed of SFP. In this case, it
4575                  * is necessary to use the SFP's speed as the supported_speed.
4576                  */
4577                 if (mac->supported_speed == 0)
4578                         mac->supported_speed =
4579                                 hns3_set_firber_default_support_speed(hw);
4580         }
4581
4582         return 0;
4583 }
4584
4585 static void
4586 hns3_get_fc_autoneg_capability(struct hns3_adapter *hns)
4587 {
4588         struct hns3_mac *mac = &hns->hw.mac;
4589
4590         if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) {
4591                 hns->pf.support_fc_autoneg = true;
4592                 return;
4593         }
4594
4595         /*
4596          * Flow control auto-negotiation requires the cooperation of the driver
4597          * and firmware. Currently, the optical port does not support flow
4598          * control auto-negotiation.
4599          */
4600         hns->pf.support_fc_autoneg = false;
4601 }
4602
4603 static int
4604 hns3_init_pf(struct rte_eth_dev *eth_dev)
4605 {
4606         struct rte_device *dev = eth_dev->device;
4607         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4608         struct hns3_adapter *hns = eth_dev->data->dev_private;
4609         struct hns3_hw *hw = &hns->hw;
4610         int ret;
4611
4612         PMD_INIT_FUNC_TRACE();
4613
4614         /* Get hardware io base address from pcie BAR2 IO space */
4615         hw->io_base = pci_dev->mem_resource[2].addr;
4616
4617         /* Firmware command queue initialize */
4618         ret = hns3_cmd_init_queue(hw);
4619         if (ret) {
4620                 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
4621                 goto err_cmd_init_queue;
4622         }
4623
4624         hns3_clear_all_event_cause(hw);
4625
4626         /* Firmware command initialize */
4627         ret = hns3_cmd_init(hw);
4628         if (ret) {
4629                 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
4630                 goto err_cmd_init;
4631         }
4632
4633         hns3_tx_push_init(eth_dev);
4634
4635         /*
4636          * To ensure that the hardware environment is clean during
4637          * initialization, the driver actively clear the hardware environment
4638          * during initialization, including PF and corresponding VFs' vlan, mac,
4639          * flow table configurations, etc.
4640          */
4641         ret = hns3_clear_hw(hw);
4642         if (ret) {
4643                 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret);
4644                 goto err_cmd_init;
4645         }
4646
4647         /* Hardware statistics of imissed registers cleared. */
4648         ret = hns3_update_imissed_stats(hw, true);
4649         if (ret) {
4650                 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
4651                 goto err_cmd_init;
4652         }
4653
4654         hns3_config_all_msix_error(hw, true);
4655
4656         ret = rte_intr_callback_register(pci_dev->intr_handle,
4657                                          hns3_interrupt_handler,
4658                                          eth_dev);
4659         if (ret) {
4660                 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
4661                 goto err_intr_callback_register;
4662         }
4663
4664         ret = hns3_ptp_init(hw);
4665         if (ret)
4666                 goto err_get_config;
4667
4668         /* Enable interrupt */
4669         rte_intr_enable(pci_dev->intr_handle);
4670         hns3_pf_enable_irq0(hw);
4671
4672         /* Get configuration */
4673         ret = hns3_get_configuration(hw);
4674         if (ret) {
4675                 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
4676                 goto err_get_config;
4677         }
4678
4679         ret = hns3_tqp_stats_init(hw);
4680         if (ret)
4681                 goto err_get_config;
4682
4683         ret = hns3_init_hardware(hns);
4684         if (ret) {
4685                 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
4686                 goto err_init_hw;
4687         }
4688
4689         /* Initialize flow director filter list & hash */
4690         ret = hns3_fdir_filter_init(hns);
4691         if (ret) {
4692                 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
4693                 goto err_fdir;
4694         }
4695
4696         hns3_rss_set_default_args(hw);
4697
4698         ret = hns3_enable_hw_error_intr(hns, true);
4699         if (ret) {
4700                 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d",
4701                              ret);
4702                 goto err_enable_intr;
4703         }
4704
4705         ret = hns3_get_port_supported_speed(eth_dev);
4706         if (ret) {
4707                 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported "
4708                              "by device, ret = %d.", ret);
4709                 goto err_supported_speed;
4710         }
4711
4712         hns3_get_fc_autoneg_capability(hns);
4713
4714         hns3_tm_conf_init(eth_dev);
4715
4716         return 0;
4717
4718 err_supported_speed:
4719         (void)hns3_enable_hw_error_intr(hns, false);
4720 err_enable_intr:
4721         hns3_fdir_filter_uninit(hns);
4722 err_fdir:
4723         hns3_uninit_umv_space(hw);
4724 err_init_hw:
4725         hns3_tqp_stats_uninit(hw);
4726 err_get_config:
4727         hns3_pf_disable_irq0(hw);
4728         rte_intr_disable(pci_dev->intr_handle);
4729         hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler,
4730                              eth_dev);
4731 err_intr_callback_register:
4732 err_cmd_init:
4733         hns3_cmd_uninit(hw);
4734         hns3_cmd_destroy_queue(hw);
4735 err_cmd_init_queue:
4736         hw->io_base = NULL;
4737
4738         return ret;
4739 }
4740
4741 static void
4742 hns3_uninit_pf(struct rte_eth_dev *eth_dev)
4743 {
4744         struct hns3_adapter *hns = eth_dev->data->dev_private;
4745         struct rte_device *dev = eth_dev->device;
4746         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4747         struct hns3_hw *hw = &hns->hw;
4748
4749         PMD_INIT_FUNC_TRACE();
4750
4751         hns3_tm_conf_uninit(eth_dev);
4752         hns3_enable_hw_error_intr(hns, false);
4753         hns3_rss_uninit(hns);
4754         (void)hns3_config_gro(hw, false);
4755         hns3_promisc_uninit(hw);
4756         hns3_flow_uninit(eth_dev);
4757         hns3_fdir_filter_uninit(hns);
4758         hns3_uninit_umv_space(hw);
4759         hns3_tqp_stats_uninit(hw);
4760         hns3_config_mac_tnl_int(hw, false);
4761         hns3_pf_disable_irq0(hw);
4762         rte_intr_disable(pci_dev->intr_handle);
4763         hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler,
4764                              eth_dev);
4765         hns3_config_all_msix_error(hw, false);
4766         hns3_cmd_uninit(hw);
4767         hns3_cmd_destroy_queue(hw);
4768         hw->io_base = NULL;
4769 }
4770
4771 static uint32_t
4772 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
4773 {
4774         uint32_t speed_bit;
4775
4776         switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
4777         case RTE_ETH_LINK_SPEED_10M:
4778                 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
4779                 break;
4780         case RTE_ETH_LINK_SPEED_10M_HD:
4781                 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
4782                 break;
4783         case RTE_ETH_LINK_SPEED_100M:
4784                 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
4785                 break;
4786         case RTE_ETH_LINK_SPEED_100M_HD:
4787                 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
4788                 break;
4789         case RTE_ETH_LINK_SPEED_1G:
4790                 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
4791                 break;
4792         default:
4793                 speed_bit = 0;
4794                 break;
4795         }
4796
4797         return speed_bit;
4798 }
4799
4800 static uint32_t
4801 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
4802 {
4803         uint32_t speed_bit;
4804
4805         switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
4806         case RTE_ETH_LINK_SPEED_1G:
4807                 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
4808                 break;
4809         case RTE_ETH_LINK_SPEED_10G:
4810                 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
4811                 break;
4812         case RTE_ETH_LINK_SPEED_25G:
4813                 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
4814                 break;
4815         case RTE_ETH_LINK_SPEED_40G:
4816                 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
4817                 break;
4818         case RTE_ETH_LINK_SPEED_50G:
4819                 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
4820                 break;
4821         case RTE_ETH_LINK_SPEED_100G:
4822                 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
4823                 break;
4824         case RTE_ETH_LINK_SPEED_200G:
4825                 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
4826                 break;
4827         default:
4828                 speed_bit = 0;
4829                 break;
4830         }
4831
4832         return speed_bit;
4833 }
4834
4835 static int
4836 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
4837 {
4838         struct hns3_mac *mac = &hw->mac;
4839         uint32_t supported_speed = mac->supported_speed;
4840         uint32_t speed_bit = 0;
4841
4842         if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
4843                 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds);
4844         else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER)
4845                 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds);
4846
4847         if (!(speed_bit & supported_speed)) {
4848                 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.",
4849                          link_speeds);
4850                 return -EINVAL;
4851         }
4852
4853         return 0;
4854 }
4855
4856 static inline uint32_t
4857 hns3_get_link_speed(uint32_t link_speeds)
4858 {
4859         uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
4860
4861         if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
4862             link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
4863                 speed = RTE_ETH_SPEED_NUM_10M;
4864         if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
4865             link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
4866                 speed = RTE_ETH_SPEED_NUM_100M;
4867         if (link_speeds & RTE_ETH_LINK_SPEED_1G)
4868                 speed = RTE_ETH_SPEED_NUM_1G;
4869         if (link_speeds & RTE_ETH_LINK_SPEED_10G)
4870                 speed = RTE_ETH_SPEED_NUM_10G;
4871         if (link_speeds & RTE_ETH_LINK_SPEED_25G)
4872                 speed = RTE_ETH_SPEED_NUM_25G;
4873         if (link_speeds & RTE_ETH_LINK_SPEED_40G)
4874                 speed = RTE_ETH_SPEED_NUM_40G;
4875         if (link_speeds & RTE_ETH_LINK_SPEED_50G)
4876                 speed = RTE_ETH_SPEED_NUM_50G;
4877         if (link_speeds & RTE_ETH_LINK_SPEED_100G)
4878                 speed = RTE_ETH_SPEED_NUM_100G;
4879         if (link_speeds & RTE_ETH_LINK_SPEED_200G)
4880                 speed = RTE_ETH_SPEED_NUM_200G;
4881
4882         return speed;
4883 }
4884
4885 static uint8_t
4886 hns3_get_link_duplex(uint32_t link_speeds)
4887 {
4888         if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
4889             (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
4890                 return RTE_ETH_LINK_HALF_DUPLEX;
4891         else
4892                 return RTE_ETH_LINK_FULL_DUPLEX;
4893 }
4894
4895 static int
4896 hns3_set_copper_port_link_speed(struct hns3_hw *hw,
4897                                 struct hns3_set_link_speed_cfg *cfg)
4898 {
4899         struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
4900         struct hns3_phy_params_bd0_cmd *req;
4901         uint16_t i;
4902
4903         for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
4904                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
4905                                           false);
4906                 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4907         }
4908         hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false);
4909         req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
4910         req->autoneg = cfg->autoneg;
4911
4912         /*
4913          * The full speed capability is used to negotiate when
4914          * auto-negotiation is enabled.
4915          */
4916         if (cfg->autoneg) {
4917                 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT |
4918                                     HNS3_PHY_LINK_SPEED_10M_HD_BIT |
4919                                     HNS3_PHY_LINK_SPEED_100M_BIT |
4920                                     HNS3_PHY_LINK_SPEED_100M_HD_BIT |
4921                                     HNS3_PHY_LINK_SPEED_1000M_BIT;
4922         } else {
4923                 req->speed = cfg->speed;
4924                 req->duplex = cfg->duplex;
4925         }
4926
4927         return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
4928 }
4929
4930 static int
4931 hns3_set_autoneg(struct hns3_hw *hw, bool enable)
4932 {
4933         struct hns3_config_auto_neg_cmd *req;
4934         struct hns3_cmd_desc desc;
4935         uint32_t flag = 0;
4936         int ret;
4937
4938         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false);
4939
4940         req = (struct hns3_config_auto_neg_cmd *)desc.data;
4941         if (enable)
4942                 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1);
4943         req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag);
4944
4945         ret = hns3_cmd_send(hw, &desc, 1);
4946         if (ret)
4947                 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret);
4948
4949         return ret;
4950 }
4951
4952 static int
4953 hns3_set_fiber_port_link_speed(struct hns3_hw *hw,
4954                                struct hns3_set_link_speed_cfg *cfg)
4955 {
4956         int ret;
4957
4958         if (hw->mac.support_autoneg) {
4959                 ret = hns3_set_autoneg(hw, cfg->autoneg);
4960                 if (ret) {
4961                         hns3_err(hw, "failed to configure auto-negotiation.");
4962                         return ret;
4963                 }
4964
4965                 /*
4966                  * To enable auto-negotiation, we only need to open the switch
4967                  * of auto-negotiation, then firmware sets all speed
4968                  * capabilities.
4969                  */
4970                 if (cfg->autoneg)
4971                         return 0;
4972         }
4973
4974         /*
4975          * Some hardware doesn't support auto-negotiation, but users may not
4976          * configure link_speeds (default 0), which means auto-negotiation.
4977          * In this case, a warning message need to be printed, instead of
4978          * an error.
4979          */
4980         if (cfg->autoneg) {
4981                 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!");
4982                 return 0;
4983         }
4984
4985         return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex);
4986 }
4987
4988 static int
4989 hns3_set_port_link_speed(struct hns3_hw *hw,
4990                          struct hns3_set_link_speed_cfg *cfg)
4991 {
4992         int ret;
4993
4994         if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) {
4995 #if defined(RTE_HNS3_ONLY_1630_FPGA)
4996                 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
4997                 if (pf->is_tmp_phy)
4998                         return 0;
4999 #endif
5000
5001                 ret = hns3_set_copper_port_link_speed(hw, cfg);
5002                 if (ret) {
5003                         hns3_err(hw, "failed to set copper port link speed,"
5004                                  "ret = %d.", ret);
5005                         return ret;
5006                 }
5007         } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) {
5008                 ret = hns3_set_fiber_port_link_speed(hw, cfg);
5009                 if (ret) {
5010                         hns3_err(hw, "failed to set fiber port link speed,"
5011                                  "ret = %d.", ret);
5012                         return ret;
5013                 }
5014         }
5015
5016         return 0;
5017 }
5018
5019 static int
5020 hns3_apply_link_speed(struct hns3_hw *hw)
5021 {
5022         struct rte_eth_conf *conf = &hw->data->dev_conf;
5023         struct hns3_set_link_speed_cfg cfg;
5024
5025         memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
5026         cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
5027                         RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
5028         if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
5029                 cfg.speed = hns3_get_link_speed(conf->link_speeds);
5030                 cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
5031         }
5032
5033         return hns3_set_port_link_speed(hw, &cfg);
5034 }
5035
5036 static int
5037 hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
5038 {
5039         struct hns3_hw *hw = &hns->hw;
5040         bool link_en;
5041         int ret;
5042
5043         ret = hns3_update_queue_map_configure(hns);
5044         if (ret) {
5045                 hns3_err(hw, "failed to update queue mapping configuration, ret = %d",
5046                          ret);
5047                 return ret;
5048         }
5049
5050         /* Note: hns3_tm_conf_update must be called after configuring DCB. */
5051         ret = hns3_tm_conf_update(hw);
5052         if (ret) {
5053                 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret);
5054                 return ret;
5055         }
5056
5057         hns3_enable_rxd_adv_layout(hw);
5058
5059         ret = hns3_init_queues(hns, reset_queue);
5060         if (ret) {
5061                 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret);
5062                 return ret;
5063         }
5064
5065         link_en = hw->set_link_down ? false : true;
5066         ret = hns3_cfg_mac_mode(hw, link_en);
5067         if (ret) {
5068                 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret);
5069                 goto err_config_mac_mode;
5070         }
5071
5072         ret = hns3_apply_link_speed(hw);
5073         if (ret)
5074                 goto err_set_link_speed;
5075
5076         return 0;
5077
5078 err_set_link_speed:
5079         (void)hns3_cfg_mac_mode(hw, false);
5080
5081 err_config_mac_mode:
5082         hns3_dev_release_mbufs(hns);
5083         /*
5084          * Here is exception handling, hns3_reset_all_tqps will have the
5085          * corresponding error message if it is handled incorrectly, so it is
5086          * not necessary to check hns3_reset_all_tqps return value, here keep
5087          * ret as the error code causing the exception.
5088          */
5089         (void)hns3_reset_all_tqps(hns);
5090         return ret;
5091 }
5092
5093 static void
5094 hns3_restore_filter(struct rte_eth_dev *dev)
5095 {
5096         hns3_restore_rss_filter(dev);
5097 }
5098
5099 static int
5100 hns3_dev_start(struct rte_eth_dev *dev)
5101 {
5102         struct hns3_adapter *hns = dev->data->dev_private;
5103         struct hns3_hw *hw = &hns->hw;
5104         bool old_state = hw->set_link_down;
5105         int ret;
5106
5107         PMD_INIT_FUNC_TRACE();
5108         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
5109                 return -EBUSY;
5110
5111         rte_spinlock_lock(&hw->lock);
5112         hw->adapter_state = HNS3_NIC_STARTING;
5113
5114         /*
5115          * If the dev_set_link_down() API has been called, the "set_link_down"
5116          * flag can be cleared by dev_start() API. In addition, the flag should
5117          * also be cleared before calling hns3_do_start() so that MAC can be
5118          * enabled in dev_start stage.
5119          */
5120         hw->set_link_down = false;
5121         ret = hns3_do_start(hns, true);
5122         if (ret)
5123                 goto do_start_fail;
5124
5125         ret = hns3_map_rx_interrupt(dev);
5126         if (ret)
5127                 goto map_rx_inter_err;
5128
5129         /*
5130          * There are three register used to control the status of a TQP
5131          * (contains a pair of Tx queue and Rx queue) in the new version network
5132          * engine. One is used to control the enabling of Tx queue, the other is
5133          * used to control the enabling of Rx queue, and the last is the master
5134          * switch used to control the enabling of the tqp. The Tx register and
5135          * TQP register must be enabled at the same time to enable a Tx queue.
5136          * The same applies to the Rx queue. For the older network engine, this
5137          * function only refresh the enabled flag, and it is used to update the
5138          * status of queue in the dpdk framework.
5139          */
5140         ret = hns3_start_all_txqs(dev);
5141         if (ret)
5142                 goto map_rx_inter_err;
5143
5144         ret = hns3_start_all_rxqs(dev);
5145         if (ret)
5146                 goto start_all_rxqs_fail;
5147
5148         hw->adapter_state = HNS3_NIC_STARTED;
5149         rte_spinlock_unlock(&hw->lock);
5150
5151         hns3_rx_scattered_calc(dev);
5152         hns3_set_rxtx_function(dev);
5153         hns3_mp_req_start_rxtx(dev);
5154
5155         hns3_restore_filter(dev);
5156
5157         /* Enable interrupt of all rx queues before enabling queues */
5158         hns3_dev_all_rx_queue_intr_enable(hw, true);
5159
5160         /*
5161          * After finished the initialization, enable tqps to receive/transmit
5162          * packets and refresh all queue status.
5163          */
5164         hns3_start_tqps(hw);
5165
5166         hns3_tm_dev_start_proc(hw);
5167
5168         if (dev->data->dev_conf.intr_conf.lsc != 0)
5169                 hns3_dev_link_update(dev, 0);
5170         rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
5171
5172         hns3_info(hw, "hns3 dev start successful!");
5173
5174         return 0;
5175
5176 start_all_rxqs_fail:
5177         hns3_stop_all_txqs(dev);
5178 map_rx_inter_err:
5179         (void)hns3_do_stop(hns);
5180 do_start_fail:
5181         hw->set_link_down = old_state;
5182         hw->adapter_state = HNS3_NIC_CONFIGURED;
5183         rte_spinlock_unlock(&hw->lock);
5184
5185         return ret;
5186 }
5187
5188 static int
5189 hns3_do_stop(struct hns3_adapter *hns)
5190 {
5191         struct hns3_hw *hw = &hns->hw;
5192         int ret;
5193
5194         /*
5195          * The "hns3_do_stop" function will also be called by .stop_service to
5196          * prepare reset. At the time of global or IMP reset, the command cannot
5197          * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
5198          * accessed during the reset process. So the mbuf can not be released
5199          * during reset and is required to be released after the reset is
5200          * completed.
5201          */
5202         if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
5203                 hns3_dev_release_mbufs(hns);
5204
5205         ret = hns3_cfg_mac_mode(hw, false);
5206         if (ret)
5207                 return ret;
5208         hw->mac.link_status = RTE_ETH_LINK_DOWN;
5209
5210         if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
5211                 hns3_configure_all_mac_addr(hns, true);
5212                 ret = hns3_reset_all_tqps(hns);
5213                 if (ret) {
5214                         hns3_err(hw, "failed to reset all queues ret = %d.",
5215                                  ret);
5216                         return ret;
5217                 }
5218         }
5219
5220         return 0;
5221 }
5222
5223 static int
5224 hns3_dev_stop(struct rte_eth_dev *dev)
5225 {
5226         struct hns3_adapter *hns = dev->data->dev_private;
5227         struct hns3_hw *hw = &hns->hw;
5228
5229         PMD_INIT_FUNC_TRACE();
5230         dev->data->dev_started = 0;
5231
5232         hw->adapter_state = HNS3_NIC_STOPPING;
5233         hns3_set_rxtx_function(dev);
5234         rte_wmb();
5235         /* Disable datapath on secondary process. */
5236         hns3_mp_req_stop_rxtx(dev);
5237         /* Prevent crashes when queues are still in use. */
5238         rte_delay_ms(hw->cfg_max_queues);
5239
5240         rte_spinlock_lock(&hw->lock);
5241         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
5242                 hns3_tm_dev_stop_proc(hw);
5243                 hns3_config_mac_tnl_int(hw, false);
5244                 hns3_stop_tqps(hw);
5245                 hns3_do_stop(hns);
5246                 hns3_unmap_rx_interrupt(dev);
5247                 hw->adapter_state = HNS3_NIC_CONFIGURED;
5248         }
5249         hns3_rx_scattered_reset(dev);
5250         rte_eal_alarm_cancel(hns3_service_handler, dev);
5251         hns3_stop_report_lse(dev);
5252         rte_spinlock_unlock(&hw->lock);
5253
5254         return 0;
5255 }
5256
5257 static int
5258 hns3_dev_close(struct rte_eth_dev *eth_dev)
5259 {
5260         struct hns3_adapter *hns = eth_dev->data->dev_private;
5261         struct hns3_hw *hw = &hns->hw;
5262         int ret = 0;
5263
5264         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
5265                 hns3_mp_uninit(eth_dev);
5266                 return 0;
5267         }
5268
5269         if (hw->adapter_state == HNS3_NIC_STARTED)
5270                 ret = hns3_dev_stop(eth_dev);
5271
5272         hw->adapter_state = HNS3_NIC_CLOSING;
5273         hns3_reset_abort(hns);
5274         hw->adapter_state = HNS3_NIC_CLOSED;
5275
5276         hns3_configure_all_mc_mac_addr(hns, true);
5277         hns3_remove_all_vlan_table(hns);
5278         hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
5279         hns3_uninit_pf(eth_dev);
5280         hns3_free_all_queues(eth_dev);
5281         rte_free(hw->reset.wait_data);
5282         hns3_mp_uninit(eth_dev);
5283         hns3_warn(hw, "Close port %u finished", hw->data->port_id);
5284
5285         return ret;
5286 }
5287
5288 static void
5289 hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause,
5290                                    bool *tx_pause)
5291 {
5292         struct hns3_mac *mac = &hw->mac;
5293         uint32_t advertising = mac->advertising;
5294         uint32_t lp_advertising = mac->lp_advertising;
5295         *rx_pause = false;
5296         *tx_pause = false;
5297
5298         if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) {
5299                 *rx_pause = true;
5300                 *tx_pause = true;
5301         } else if (advertising & lp_advertising &
5302                    HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) {
5303                 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT)
5304                         *rx_pause = true;
5305                 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT)
5306                         *tx_pause = true;
5307         }
5308 }
5309
5310 static enum hns3_fc_mode
5311 hns3_get_autoneg_fc_mode(struct hns3_hw *hw)
5312 {
5313         enum hns3_fc_mode current_mode;
5314         bool rx_pause = false;
5315         bool tx_pause = false;
5316
5317         switch (hw->mac.media_type) {
5318         case HNS3_MEDIA_TYPE_COPPER:
5319                 hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause);
5320                 break;
5321
5322         /*
5323          * Flow control auto-negotiation is not supported for fiber and
5324          * backplane media type.
5325          */
5326         case HNS3_MEDIA_TYPE_FIBER:
5327         case HNS3_MEDIA_TYPE_BACKPLANE:
5328                 hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled.");
5329                 current_mode = hw->requested_fc_mode;
5330                 goto out;
5331         default:
5332                 hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).",
5333                          hw->mac.media_type);
5334                 current_mode = HNS3_FC_NONE;
5335                 goto out;
5336         }
5337
5338         if (rx_pause && tx_pause)
5339                 current_mode = HNS3_FC_FULL;
5340         else if (rx_pause)
5341                 current_mode = HNS3_FC_RX_PAUSE;
5342         else if (tx_pause)
5343                 current_mode = HNS3_FC_TX_PAUSE;
5344         else
5345                 current_mode = HNS3_FC_NONE;
5346
5347 out:
5348         return current_mode;
5349 }
5350
5351 static enum hns3_fc_mode
5352 hns3_get_current_fc_mode(struct rte_eth_dev *dev)
5353 {
5354         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5355         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5356         struct hns3_mac *mac = &hw->mac;
5357
5358         /*
5359          * When the flow control mode is obtained, the device may not complete
5360          * auto-negotiation. It is necessary to wait for link establishment.
5361          */
5362         (void)hns3_dev_link_update(dev, 1);
5363
5364         /*
5365          * If the link auto-negotiation of the nic is disabled, or the flow
5366          * control auto-negotiation is not supported, the forced flow control
5367          * mode is used.
5368          */
5369         if (mac->link_autoneg == 0 || !pf->support_fc_autoneg)
5370                 return hw->requested_fc_mode;
5371
5372         return hns3_get_autoneg_fc_mode(hw);
5373 }
5374
5375 static int
5376 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
5377 {
5378         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5379         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5380         enum hns3_fc_mode current_mode;
5381
5382         current_mode = hns3_get_current_fc_mode(dev);
5383         switch (current_mode) {
5384         case HNS3_FC_FULL:
5385                 fc_conf->mode = RTE_ETH_FC_FULL;
5386                 break;
5387         case HNS3_FC_TX_PAUSE:
5388                 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
5389                 break;
5390         case HNS3_FC_RX_PAUSE:
5391                 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
5392                 break;
5393         case HNS3_FC_NONE:
5394         default:
5395                 fc_conf->mode = RTE_ETH_FC_NONE;
5396                 break;
5397         }
5398
5399         fc_conf->pause_time = pf->pause_time;
5400         fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0;
5401
5402         return 0;
5403 }
5404
5405 static int
5406 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg)
5407 {
5408         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
5409
5410         if (!pf->support_fc_autoneg) {
5411                 if (autoneg != 0) {
5412                         hns3_err(hw, "unsupported fc auto-negotiation setting.");
5413                         return -EOPNOTSUPP;
5414                 }
5415
5416                 /*
5417                  * Flow control auto-negotiation of the NIC is not supported,
5418                  * but other auto-negotiation features may be supported.
5419                  */
5420                 if (autoneg != hw->mac.link_autoneg) {
5421                         hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!");
5422                         return -EOPNOTSUPP;
5423                 }
5424
5425                 return 0;
5426         }
5427
5428         /*
5429          * If flow control auto-negotiation of the NIC is supported, all
5430          * auto-negotiation features are supported.
5431          */
5432         if (autoneg != hw->mac.link_autoneg) {
5433                 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!");
5434                 return -EOPNOTSUPP;
5435         }
5436
5437         return 0;
5438 }
5439
5440 static int
5441 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
5442 {
5443         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5444         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5445         int ret;
5446
5447         if (fc_conf->high_water || fc_conf->low_water ||
5448             fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) {
5449                 hns3_err(hw, "Unsupported flow control settings specified, "
5450                          "high_water(%u), low_water(%u), send_xon(%u) and "
5451                          "mac_ctrl_frame_fwd(%u) must be set to '0'",
5452                          fc_conf->high_water, fc_conf->low_water,
5453                          fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd);
5454                 return -EINVAL;
5455         }
5456
5457         ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg);
5458         if (ret)
5459                 return ret;
5460
5461         if (!fc_conf->pause_time) {
5462                 hns3_err(hw, "Invalid pause time %u setting.",
5463                          fc_conf->pause_time);
5464                 return -EINVAL;
5465         }
5466
5467         if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
5468             hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) {
5469                 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. "
5470                          "current_fc_status = %d", hw->current_fc_status);
5471                 return -EOPNOTSUPP;
5472         }
5473
5474         if (hw->num_tc > 1 && !pf->support_multi_tc_pause) {
5475                 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported.");
5476                 return -EOPNOTSUPP;
5477         }
5478
5479         rte_spinlock_lock(&hw->lock);
5480         ret = hns3_fc_enable(dev, fc_conf);
5481         rte_spinlock_unlock(&hw->lock);
5482
5483         return ret;
5484 }
5485
5486 static int
5487 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,
5488                             struct rte_eth_pfc_conf *pfc_conf)
5489 {
5490         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5491         int ret;
5492
5493         if (!hns3_dev_get_support(hw, DCB)) {
5494                 hns3_err(hw, "This port does not support dcb configurations.");
5495                 return -EOPNOTSUPP;
5496         }
5497
5498         if (pfc_conf->fc.high_water || pfc_conf->fc.low_water ||
5499             pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) {
5500                 hns3_err(hw, "Unsupported flow control settings specified, "
5501                          "high_water(%u), low_water(%u), send_xon(%u) and "
5502                          "mac_ctrl_frame_fwd(%u) must be set to '0'",
5503                          pfc_conf->fc.high_water, pfc_conf->fc.low_water,
5504                          pfc_conf->fc.send_xon,
5505                          pfc_conf->fc.mac_ctrl_frame_fwd);
5506                 return -EINVAL;
5507         }
5508         if (pfc_conf->fc.autoneg) {
5509                 hns3_err(hw, "Unsupported fc auto-negotiation setting.");
5510                 return -EINVAL;
5511         }
5512         if (pfc_conf->fc.pause_time == 0) {
5513                 hns3_err(hw, "Invalid pause time %u setting.",
5514                          pfc_conf->fc.pause_time);
5515                 return -EINVAL;
5516         }
5517
5518         if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
5519             hw->current_fc_status == HNS3_FC_STATUS_PFC)) {
5520                 hns3_err(hw, "MAC pause is enabled. Cannot set PFC."
5521                              "current_fc_status = %d", hw->current_fc_status);
5522                 return -EOPNOTSUPP;
5523         }
5524
5525         rte_spinlock_lock(&hw->lock);
5526         ret = hns3_dcb_pfc_enable(dev, pfc_conf);
5527         rte_spinlock_unlock(&hw->lock);
5528
5529         return ret;
5530 }
5531
5532 static int
5533 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
5534 {
5535         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5536         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5537         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
5538         int i;
5539
5540         rte_spinlock_lock(&hw->lock);
5541         if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
5542                 dcb_info->nb_tcs = pf->local_max_tc;
5543         else
5544                 dcb_info->nb_tcs = 1;
5545
5546         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
5547                 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i];
5548         for (i = 0; i < dcb_info->nb_tcs; i++)
5549                 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
5550
5551         for (i = 0; i < hw->num_tc; i++) {
5552                 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i;
5553                 dcb_info->tc_queue.tc_txq[0][i].base =
5554                                                 hw->tc_queue[i].tqp_offset;
5555                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size;
5556                 dcb_info->tc_queue.tc_txq[0][i].nb_queue =
5557                                                 hw->tc_queue[i].tqp_count;
5558         }
5559         rte_spinlock_unlock(&hw->lock);
5560
5561         return 0;
5562 }
5563
5564 static int
5565 hns3_reinit_dev(struct hns3_adapter *hns)
5566 {
5567         struct hns3_hw *hw = &hns->hw;
5568         int ret;
5569
5570         ret = hns3_cmd_init(hw);
5571         if (ret) {
5572                 hns3_err(hw, "Failed to init cmd: %d", ret);
5573                 return ret;
5574         }
5575
5576         ret = hns3_reset_all_tqps(hns);
5577         if (ret) {
5578                 hns3_err(hw, "Failed to reset all queues: %d", ret);
5579                 return ret;
5580         }
5581
5582         ret = hns3_init_hardware(hns);
5583         if (ret) {
5584                 hns3_err(hw, "Failed to init hardware: %d", ret);
5585                 return ret;
5586         }
5587
5588         ret = hns3_enable_hw_error_intr(hns, true);
5589         if (ret) {
5590                 hns3_err(hw, "fail to enable hw error interrupts: %d",
5591                              ret);
5592                 return ret;
5593         }
5594         hns3_info(hw, "Reset done, driver initialization finished.");
5595
5596         return 0;
5597 }
5598
5599 static bool
5600 is_pf_reset_done(struct hns3_hw *hw)
5601 {
5602         uint32_t val, reg, reg_bit;
5603
5604         switch (hw->reset.level) {
5605         case HNS3_IMP_RESET:
5606                 reg = HNS3_GLOBAL_RESET_REG;
5607                 reg_bit = HNS3_IMP_RESET_BIT;
5608                 break;
5609         case HNS3_GLOBAL_RESET:
5610                 reg = HNS3_GLOBAL_RESET_REG;
5611                 reg_bit = HNS3_GLOBAL_RESET_BIT;
5612                 break;
5613         case HNS3_FUNC_RESET:
5614                 reg = HNS3_FUN_RST_ING;
5615                 reg_bit = HNS3_FUN_RST_ING_B;
5616                 break;
5617         case HNS3_FLR_RESET:
5618         default:
5619                 hns3_err(hw, "Wait for unsupported reset level: %d",
5620                          hw->reset.level);
5621                 return true;
5622         }
5623         val = hns3_read_dev(hw, reg);
5624         if (hns3_get_bit(val, reg_bit))
5625                 return false;
5626         else
5627                 return true;
5628 }
5629
5630 bool
5631 hns3_is_reset_pending(struct hns3_adapter *hns)
5632 {
5633         struct hns3_hw *hw = &hns->hw;
5634         enum hns3_reset_level reset;
5635
5636         hns3_check_event_cause(hns, NULL);
5637         reset = hns3_get_reset_level(hns, &hw->reset.pending);
5638         if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
5639             hw->reset.level < reset) {
5640                 hns3_warn(hw, "High level reset %d is pending", reset);
5641                 return true;
5642         }
5643         reset = hns3_get_reset_level(hns, &hw->reset.request);
5644         if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
5645             hw->reset.level < reset) {
5646                 hns3_warn(hw, "High level reset %d is request", reset);
5647                 return true;
5648         }
5649         return false;
5650 }
5651
5652 static int
5653 hns3_wait_hardware_ready(struct hns3_adapter *hns)
5654 {
5655         struct hns3_hw *hw = &hns->hw;
5656         struct hns3_wait_data *wait_data = hw->reset.wait_data;
5657         struct timeval tv;
5658
5659         if (wait_data->result == HNS3_WAIT_SUCCESS)
5660                 return 0;
5661         else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
5662                 hns3_clock_gettime(&tv);
5663                 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
5664                           tv.tv_sec, tv.tv_usec);
5665                 return -ETIME;
5666         } else if (wait_data->result == HNS3_WAIT_REQUEST)
5667                 return -EAGAIN;
5668
5669         wait_data->hns = hns;
5670         wait_data->check_completion = is_pf_reset_done;
5671         wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT *
5672                                 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms();
5673         wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC;
5674         wait_data->count = HNS3_RESET_WAIT_CNT;
5675         wait_data->result = HNS3_WAIT_REQUEST;
5676         rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
5677         return -EAGAIN;
5678 }
5679
5680 static int
5681 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id)
5682 {
5683         struct hns3_cmd_desc desc;
5684         struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data;
5685
5686         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
5687         hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1);
5688         req->fun_reset_vfid = func_id;
5689
5690         return hns3_cmd_send(hw, &desc, 1);
5691 }
5692
5693 static int
5694 hns3_imp_reset_cmd(struct hns3_hw *hw)
5695 {
5696         struct hns3_cmd_desc desc;
5697
5698         hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false);
5699         desc.data[0] = 0xeedd;
5700
5701         return hns3_cmd_send(hw, &desc, 1);
5702 }
5703
5704 static void
5705 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level)
5706 {
5707         struct hns3_hw *hw = &hns->hw;
5708         struct timeval tv;
5709         uint32_t val;
5710
5711         hns3_clock_gettime(&tv);
5712         if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) ||
5713             hns3_read_dev(hw, HNS3_FUN_RST_ING)) {
5714                 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld",
5715                           tv.tv_sec, tv.tv_usec);
5716                 return;
5717         }
5718
5719         switch (reset_level) {
5720         case HNS3_IMP_RESET:
5721                 hns3_imp_reset_cmd(hw);
5722                 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld",
5723                           tv.tv_sec, tv.tv_usec);
5724                 break;
5725         case HNS3_GLOBAL_RESET:
5726                 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
5727                 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1);
5728                 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val);
5729                 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld",
5730                           tv.tv_sec, tv.tv_usec);
5731                 break;
5732         case HNS3_FUNC_RESET:
5733                 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld",
5734                           tv.tv_sec, tv.tv_usec);
5735                 /* schedule again to check later */
5736                 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending);
5737                 hns3_schedule_reset(hns);
5738                 break;
5739         default:
5740                 hns3_warn(hw, "Unsupported reset level: %d", reset_level);
5741                 return;
5742         }
5743         hns3_atomic_clear_bit(reset_level, &hw->reset.request);
5744 }
5745
5746 static enum hns3_reset_level
5747 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
5748 {
5749         struct hns3_hw *hw = &hns->hw;
5750         enum hns3_reset_level reset_level = HNS3_NONE_RESET;
5751
5752         /* Return the highest priority reset level amongst all */
5753         if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels))
5754                 reset_level = HNS3_IMP_RESET;
5755         else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
5756                 reset_level = HNS3_GLOBAL_RESET;
5757         else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
5758                 reset_level = HNS3_FUNC_RESET;
5759         else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
5760                 reset_level = HNS3_FLR_RESET;
5761
5762         if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
5763                 return HNS3_NONE_RESET;
5764
5765         return reset_level;
5766 }
5767
5768 static void
5769 hns3_record_imp_error(struct hns3_adapter *hns)
5770 {
5771         struct hns3_hw *hw = &hns->hw;
5772         uint32_t reg_val;
5773
5774         reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
5775         if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
5776                 hns3_warn(hw, "Detected IMP RD poison!");
5777                 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
5778                 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
5779         }
5780
5781         if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
5782                 hns3_warn(hw, "Detected IMP CMDQ error!");
5783                 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
5784                 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
5785         }
5786 }
5787
5788 static int
5789 hns3_prepare_reset(struct hns3_adapter *hns)
5790 {
5791         struct hns3_hw *hw = &hns->hw;
5792         uint32_t reg_val;
5793         int ret;
5794
5795         switch (hw->reset.level) {
5796         case HNS3_FUNC_RESET:
5797                 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID);
5798                 if (ret)
5799                         return ret;
5800
5801                 /*
5802                  * After performaning pf reset, it is not necessary to do the
5803                  * mailbox handling or send any command to firmware, because
5804                  * any mailbox handling or command to firmware is only valid
5805                  * after hns3_cmd_init is called.
5806                  */
5807                 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
5808                 hw->reset.stats.request_cnt++;
5809                 break;
5810         case HNS3_IMP_RESET:
5811                 hns3_record_imp_error(hns);
5812                 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
5813                 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
5814                                BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
5815                 break;
5816         default:
5817                 break;
5818         }
5819         return 0;
5820 }
5821
5822 static int
5823 hns3_set_rst_done(struct hns3_hw *hw)
5824 {
5825         struct hns3_pf_rst_done_cmd *req;
5826         struct hns3_cmd_desc desc;
5827
5828         req = (struct hns3_pf_rst_done_cmd *)desc.data;
5829         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
5830         req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
5831         return hns3_cmd_send(hw, &desc, 1);
5832 }
5833
5834 static int
5835 hns3_stop_service(struct hns3_adapter *hns)
5836 {
5837         struct hns3_hw *hw = &hns->hw;
5838         struct rte_eth_dev *eth_dev;
5839
5840         eth_dev = &rte_eth_devices[hw->data->port_id];
5841         hw->mac.link_status = RTE_ETH_LINK_DOWN;
5842         if (hw->adapter_state == HNS3_NIC_STARTED) {
5843                 rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
5844                 hns3_update_linkstatus_and_event(hw, false);
5845         }
5846
5847         hns3_set_rxtx_function(eth_dev);
5848         rte_wmb();
5849         /* Disable datapath on secondary process. */
5850         hns3_mp_req_stop_rxtx(eth_dev);
5851         rte_delay_ms(hw->cfg_max_queues);
5852
5853         rte_spinlock_lock(&hw->lock);
5854         if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
5855             hw->adapter_state == HNS3_NIC_STOPPING) {
5856                 hns3_enable_all_queues(hw, false);
5857                 hns3_do_stop(hns);
5858                 hw->reset.mbuf_deferred_free = true;
5859         } else
5860                 hw->reset.mbuf_deferred_free = false;
5861
5862         /*
5863          * It is cumbersome for hardware to pick-and-choose entries for deletion
5864          * from table space. Hence, for function reset software intervention is
5865          * required to delete the entries
5866          */
5867         if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
5868                 hns3_configure_all_mc_mac_addr(hns, true);
5869         rte_spinlock_unlock(&hw->lock);
5870
5871         return 0;
5872 }
5873
5874 static int
5875 hns3_start_service(struct hns3_adapter *hns)
5876 {
5877         struct hns3_hw *hw = &hns->hw;
5878         struct rte_eth_dev *eth_dev;
5879
5880         if (hw->reset.level == HNS3_IMP_RESET ||
5881             hw->reset.level == HNS3_GLOBAL_RESET)
5882                 hns3_set_rst_done(hw);
5883         eth_dev = &rte_eth_devices[hw->data->port_id];
5884         hns3_set_rxtx_function(eth_dev);
5885         hns3_mp_req_start_rxtx(eth_dev);
5886         if (hw->adapter_state == HNS3_NIC_STARTED) {
5887                 /*
5888                  * This API parent function already hold the hns3_hw.lock, the
5889                  * hns3_service_handler may report lse, in bonding application
5890                  * it will call driver's ops which may acquire the hns3_hw.lock
5891                  * again, thus lead to deadlock.
5892                  * We defer calls hns3_service_handler to avoid the deadlock.
5893                  */
5894                 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
5895                                   hns3_service_handler, eth_dev);
5896
5897                 /* Enable interrupt of all rx queues before enabling queues */
5898                 hns3_dev_all_rx_queue_intr_enable(hw, true);
5899                 /*
5900                  * Enable state of each rxq and txq will be recovered after
5901                  * reset, so we need to restore them before enable all tqps;
5902                  */
5903                 hns3_restore_tqp_enable_state(hw);
5904                 /*
5905                  * When finished the initialization, enable queues to receive
5906                  * and transmit packets.
5907                  */
5908                 hns3_enable_all_queues(hw, true);
5909         }
5910
5911         return 0;
5912 }
5913
5914 static int
5915 hns3_restore_conf(struct hns3_adapter *hns)
5916 {
5917         struct hns3_hw *hw = &hns->hw;
5918         int ret;
5919
5920         ret = hns3_configure_all_mac_addr(hns, false);
5921         if (ret)
5922                 return ret;
5923
5924         ret = hns3_configure_all_mc_mac_addr(hns, false);
5925         if (ret)
5926                 goto err_mc_mac;
5927
5928         ret = hns3_dev_promisc_restore(hns);
5929         if (ret)
5930                 goto err_promisc;
5931
5932         ret = hns3_restore_vlan_table(hns);
5933         if (ret)
5934                 goto err_promisc;
5935
5936         ret = hns3_restore_vlan_conf(hns);
5937         if (ret)
5938                 goto err_promisc;
5939
5940         ret = hns3_restore_all_fdir_filter(hns);
5941         if (ret)
5942                 goto err_promisc;
5943
5944         ret = hns3_restore_ptp(hns);
5945         if (ret)
5946                 goto err_promisc;
5947
5948         ret = hns3_restore_rx_interrupt(hw);
5949         if (ret)
5950                 goto err_promisc;
5951
5952         ret = hns3_restore_gro_conf(hw);
5953         if (ret)
5954                 goto err_promisc;
5955
5956         ret = hns3_restore_fec(hw);
5957         if (ret)
5958                 goto err_promisc;
5959
5960         if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
5961                 ret = hns3_do_start(hns, false);
5962                 if (ret)
5963                         goto err_promisc;
5964                 hns3_info(hw, "hns3 dev restart successful!");
5965         } else if (hw->adapter_state == HNS3_NIC_STOPPING)
5966                 hw->adapter_state = HNS3_NIC_CONFIGURED;
5967         return 0;
5968
5969 err_promisc:
5970         hns3_configure_all_mc_mac_addr(hns, true);
5971 err_mc_mac:
5972         hns3_configure_all_mac_addr(hns, true);
5973         return ret;
5974 }
5975
5976 static void
5977 hns3_reset_service(void *param)
5978 {
5979         struct hns3_adapter *hns = (struct hns3_adapter *)param;
5980         struct hns3_hw *hw = &hns->hw;
5981         enum hns3_reset_level reset_level;
5982         struct timeval tv_delta;
5983         struct timeval tv_start;
5984         struct timeval tv;
5985         uint64_t msec;
5986         int ret;
5987
5988         /*
5989          * The interrupt is not triggered within the delay time.
5990          * The interrupt may have been lost. It is necessary to handle
5991          * the interrupt to recover from the error.
5992          */
5993         if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
5994                             SCHEDULE_DEFERRED) {
5995                 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
5996                                   __ATOMIC_RELAXED);
5997                 hns3_err(hw, "Handling interrupts in delayed tasks");
5998                 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
5999                 reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
6000                 if (reset_level == HNS3_NONE_RESET) {
6001                         hns3_err(hw, "No reset level is set, try IMP reset");
6002                         hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
6003                 }
6004         }
6005         __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
6006
6007         /*
6008          * Check if there is any ongoing reset in the hardware. This status can
6009          * be checked from reset_pending. If there is then, we need to wait for
6010          * hardware to complete reset.
6011          *    a. If we are able to figure out in reasonable time that hardware
6012          *       has fully resetted then, we can proceed with driver, client
6013          *       reset.
6014          *    b. else, we can come back later to check this status so re-sched
6015          *       now.
6016          */
6017         reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
6018         if (reset_level != HNS3_NONE_RESET) {
6019                 hns3_clock_gettime(&tv_start);
6020                 ret = hns3_reset_process(hns, reset_level);
6021                 hns3_clock_gettime(&tv);
6022                 timersub(&tv, &tv_start, &tv_delta);
6023                 msec = hns3_clock_calctime_ms(&tv_delta);
6024                 if (msec > HNS3_RESET_PROCESS_MS)
6025                         hns3_err(hw, "%d handle long time delta %" PRIu64
6026                                      " ms time=%ld.%.6ld",
6027                                  hw->reset.level, msec,
6028                                  tv.tv_sec, tv.tv_usec);
6029                 if (ret == -EAGAIN)
6030                         return;
6031         }
6032
6033         /* Check if we got any *new* reset requests to be honored */
6034         reset_level = hns3_get_reset_level(hns, &hw->reset.request);
6035         if (reset_level != HNS3_NONE_RESET)
6036                 hns3_msix_process(hns, reset_level);
6037 }
6038
6039 static unsigned int
6040 hns3_get_speed_capa_num(uint16_t device_id)
6041 {
6042         unsigned int num;
6043
6044         switch (device_id) {
6045         case HNS3_DEV_ID_25GE:
6046         case HNS3_DEV_ID_25GE_RDMA:
6047                 num = 2;
6048                 break;
6049         case HNS3_DEV_ID_100G_RDMA_MACSEC:
6050         case HNS3_DEV_ID_200G_RDMA:
6051                 num = 1;
6052                 break;
6053         default:
6054                 num = 0;
6055                 break;
6056         }
6057
6058         return num;
6059 }
6060
6061 static int
6062 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa,
6063                         uint16_t device_id)
6064 {
6065         switch (device_id) {
6066         case HNS3_DEV_ID_25GE:
6067         /* fallthrough */
6068         case HNS3_DEV_ID_25GE_RDMA:
6069                 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed;
6070                 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa;
6071
6072                 /* In HNS3 device, the 25G NIC is compatible with 10G rate */
6073                 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed;
6074                 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa;
6075                 break;
6076         case HNS3_DEV_ID_100G_RDMA_MACSEC:
6077                 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed;
6078                 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa;
6079                 break;
6080         case HNS3_DEV_ID_200G_RDMA:
6081                 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed;
6082                 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa;
6083                 break;
6084         default:
6085                 return -ENOTSUP;
6086         }
6087
6088         return 0;
6089 }
6090
6091 static int
6092 hns3_fec_get_capability(struct rte_eth_dev *dev,
6093                         struct rte_eth_fec_capa *speed_fec_capa,
6094                         unsigned int num)
6095 {
6096         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6097         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
6098         uint16_t device_id = pci_dev->id.device_id;
6099         unsigned int capa_num;
6100         int ret;
6101
6102         capa_num = hns3_get_speed_capa_num(device_id);
6103         if (capa_num == 0) {
6104                 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD",
6105                          device_id);
6106                 return -ENOTSUP;
6107         }
6108
6109         if (speed_fec_capa == NULL || num < capa_num)
6110                 return capa_num;
6111
6112         ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id);
6113         if (ret)
6114                 return -ENOTSUP;
6115
6116         return capa_num;
6117 }
6118
6119 static int
6120 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
6121 {
6122         struct hns3_config_fec_cmd *req;
6123         struct hns3_cmd_desc desc;
6124         int ret;
6125
6126         /*
6127          * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported
6128          * in device of link speed
6129          * below 10 Gbps.
6130          */
6131         if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
6132                 *state = 0;
6133                 return 0;
6134         }
6135
6136         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
6137         req = (struct hns3_config_fec_cmd *)desc.data;
6138         ret = hns3_cmd_send(hw, &desc, 1);
6139         if (ret) {
6140                 hns3_err(hw, "get current fec auto state failed, ret = %d",
6141                          ret);
6142                 return ret;
6143         }
6144
6145         *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B);
6146         return 0;
6147 }
6148
6149 static int
6150 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
6151 {
6152         struct hns3_sfp_info_cmd *resp;
6153         uint32_t tmp_fec_capa;
6154         uint8_t auto_state;
6155         struct hns3_cmd_desc desc;
6156         int ret;
6157
6158         /*
6159          * If link is down and AUTO is enabled, AUTO is returned, otherwise,
6160          * configured FEC mode is returned.
6161          * If link is up, current FEC mode is returned.
6162          */
6163         if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
6164                 ret = get_current_fec_auto_state(hw, &auto_state);
6165                 if (ret)
6166                         return ret;
6167
6168                 if (auto_state == 0x1) {
6169                         *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
6170                         return 0;
6171                 }
6172         }
6173
6174         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
6175         resp = (struct hns3_sfp_info_cmd *)desc.data;
6176         resp->query_type = HNS3_ACTIVE_QUERY;
6177
6178         ret = hns3_cmd_send(hw, &desc, 1);
6179         if (ret == -EOPNOTSUPP) {
6180                 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret);
6181                 return ret;
6182         } else if (ret) {
6183                 hns3_err(hw, "get FEC failed, ret = %d", ret);
6184                 return ret;
6185         }
6186
6187         /*
6188          * FEC mode order defined in hns3 hardware is inconsistent with
6189          * that defined in the ethdev library. So the sequence needs
6190          * to be converted.
6191          */
6192         switch (resp->active_fec) {
6193         case HNS3_HW_FEC_MODE_NOFEC:
6194                 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
6195                 break;
6196         case HNS3_HW_FEC_MODE_BASER:
6197                 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
6198                 break;
6199         case HNS3_HW_FEC_MODE_RS:
6200                 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
6201                 break;
6202         default:
6203                 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
6204                 break;
6205         }
6206
6207         *fec_capa = tmp_fec_capa;
6208         return 0;
6209 }
6210
6211 static int
6212 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
6213 {
6214         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6215
6216         return hns3_fec_get_internal(hw, fec_capa);
6217 }
6218
6219 static int
6220 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
6221 {
6222         struct hns3_config_fec_cmd *req;
6223         struct hns3_cmd_desc desc;
6224         int ret;
6225
6226         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false);
6227
6228         req = (struct hns3_config_fec_cmd *)desc.data;
6229         switch (mode) {
6230         case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC):
6231                 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6232                                 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF);
6233                 break;
6234         case RTE_ETH_FEC_MODE_CAPA_MASK(BASER):
6235                 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6236                                 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER);
6237                 break;
6238         case RTE_ETH_FEC_MODE_CAPA_MASK(RS):
6239                 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6240                                 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS);
6241                 break;
6242         case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO):
6243                 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1);
6244                 break;
6245         default:
6246                 return 0;
6247         }
6248         ret = hns3_cmd_send(hw, &desc, 1);
6249         if (ret)
6250                 hns3_err(hw, "set fec mode failed, ret = %d", ret);
6251
6252         return ret;
6253 }
6254
6255 static uint32_t
6256 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
6257 {
6258         struct hns3_mac *mac = &hw->mac;
6259         uint32_t cur_capa;
6260
6261         switch (mac->link_speed) {
6262         case RTE_ETH_SPEED_NUM_10G:
6263                 cur_capa = fec_capa[1].capa;
6264                 break;
6265         case RTE_ETH_SPEED_NUM_25G:
6266         case RTE_ETH_SPEED_NUM_100G:
6267         case RTE_ETH_SPEED_NUM_200G:
6268                 cur_capa = fec_capa[0].capa;
6269                 break;
6270         default:
6271                 cur_capa = 0;
6272                 break;
6273         }
6274
6275         return cur_capa;
6276 }
6277
6278 static bool
6279 is_fec_mode_one_bit_set(uint32_t mode)
6280 {
6281         int cnt = 0;
6282         uint8_t i;
6283
6284         for (i = 0; i < sizeof(mode); i++)
6285                 if (mode >> i & 0x1)
6286                         cnt++;
6287
6288         return cnt == 1 ? true : false;
6289 }
6290
6291 static int
6292 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
6293 {
6294 #define FEC_CAPA_NUM 2
6295         struct hns3_adapter *hns = dev->data->dev_private;
6296         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6297         struct hns3_pf *pf = &hns->pf;
6298
6299         struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
6300         uint32_t cur_capa;
6301         uint32_t num = FEC_CAPA_NUM;
6302         int ret;
6303
6304         ret = hns3_fec_get_capability(dev, fec_capa, num);
6305         if (ret < 0)
6306                 return ret;
6307
6308         /* HNS3 PMD only support one bit set mode, e.g. 0x1, 0x4 */
6309         if (!is_fec_mode_one_bit_set(mode)) {
6310                 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
6311                              "FEC mode should be only one bit set", mode);
6312                 return -EINVAL;
6313         }
6314
6315         /*
6316          * Check whether the configured mode is within the FEC capability.
6317          * If not, the configured mode will not be supported.
6318          */
6319         cur_capa = get_current_speed_fec_cap(hw, fec_capa);
6320         if (!(cur_capa & mode)) {
6321                 hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
6322                 return -EINVAL;
6323         }
6324
6325         rte_spinlock_lock(&hw->lock);
6326         ret = hns3_set_fec_hw(hw, mode);
6327         if (ret) {
6328                 rte_spinlock_unlock(&hw->lock);
6329                 return ret;
6330         }
6331
6332         pf->fec_mode = mode;
6333         rte_spinlock_unlock(&hw->lock);
6334
6335         return 0;
6336 }
6337
6338 static int
6339 hns3_restore_fec(struct hns3_hw *hw)
6340 {
6341         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
6342         struct hns3_pf *pf = &hns->pf;
6343         uint32_t mode = pf->fec_mode;
6344         int ret;
6345
6346         ret = hns3_set_fec_hw(hw, mode);
6347         if (ret)
6348                 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d",
6349                          mode, ret);
6350
6351         return ret;
6352 }
6353
6354 static int
6355 hns3_query_dev_fec_info(struct hns3_hw *hw)
6356 {
6357         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
6358         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns);
6359         int ret;
6360
6361         ret = hns3_fec_get_internal(hw, &pf->fec_mode);
6362         if (ret)
6363                 hns3_err(hw, "query device FEC info failed, ret = %d", ret);
6364
6365         return ret;
6366 }
6367
6368 static bool
6369 hns3_optical_module_existed(struct hns3_hw *hw)
6370 {
6371         struct hns3_cmd_desc desc;
6372         bool existed;
6373         int ret;
6374
6375         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true);
6376         ret = hns3_cmd_send(hw, &desc, 1);
6377         if (ret) {
6378                 hns3_err(hw,
6379                          "fail to get optical module exist state, ret = %d.\n",
6380                          ret);
6381                 return false;
6382         }
6383         existed = !!desc.data[0];
6384
6385         return existed;
6386 }
6387
6388 static int
6389 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset,
6390                                 uint32_t len, uint8_t *data)
6391 {
6392 #define HNS3_SFP_INFO_CMD_NUM 6
6393 #define HNS3_SFP_INFO_MAX_LEN \
6394         (HNS3_SFP_INFO_BD0_LEN + \
6395         (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN)
6396         struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM];
6397         struct hns3_sfp_info_bd0_cmd *sfp_info_bd0;
6398         uint16_t read_len;
6399         uint16_t copy_len;
6400         int ret;
6401         int i;
6402
6403         for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) {
6404                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM,
6405                                           true);
6406                 if (i < HNS3_SFP_INFO_CMD_NUM - 1)
6407                         desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
6408         }
6409
6410         sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data;
6411         sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset);
6412         read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN);
6413         sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len);
6414
6415         ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM);
6416         if (ret) {
6417                 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n",
6418                                 ret);
6419                 return ret;
6420         }
6421
6422         /* The data format in BD0 is different with the others. */
6423         copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN);
6424         memcpy(data, sfp_info_bd0->data, copy_len);
6425         read_len = copy_len;
6426
6427         for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) {
6428                 if (read_len >= len)
6429                         break;
6430
6431                 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN);
6432                 memcpy(data + read_len, desc[i].data, copy_len);
6433                 read_len += copy_len;
6434         }
6435
6436         return (int)read_len;
6437 }
6438
6439 static int
6440 hns3_get_module_eeprom(struct rte_eth_dev *dev,
6441                        struct rte_dev_eeprom_info *info)
6442 {
6443         struct hns3_adapter *hns = dev->data->dev_private;
6444         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6445         uint32_t offset = info->offset;
6446         uint32_t len = info->length;
6447         uint8_t *data = info->data;
6448         uint32_t read_len = 0;
6449
6450         if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER)
6451                 return -ENOTSUP;
6452
6453         if (!hns3_optical_module_existed(hw)) {
6454                 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n");
6455                 return -EIO;
6456         }
6457
6458         while (read_len < len) {
6459                 int ret;
6460                 ret = hns3_get_module_eeprom_data(hw, offset + read_len,
6461                                                   len - read_len,
6462                                                   data + read_len);
6463                 if (ret < 0)
6464                         return -EIO;
6465                 read_len += ret;
6466         }
6467
6468         return 0;
6469 }
6470
6471 static int
6472 hns3_get_module_info(struct rte_eth_dev *dev,
6473                      struct rte_eth_dev_module_info *modinfo)
6474 {
6475 #define HNS3_SFF8024_ID_SFP             0x03
6476 #define HNS3_SFF8024_ID_QSFP_8438       0x0c
6477 #define HNS3_SFF8024_ID_QSFP_8436_8636  0x0d
6478 #define HNS3_SFF8024_ID_QSFP28_8636     0x11
6479 #define HNS3_SFF_8636_V1_3              0x03
6480         struct hns3_adapter *hns = dev->data->dev_private;
6481         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6482         struct rte_dev_eeprom_info info;
6483         struct hns3_sfp_type sfp_type;
6484         int ret;
6485
6486         memset(&sfp_type, 0, sizeof(sfp_type));
6487         memset(&info, 0, sizeof(info));
6488         info.data = (uint8_t *)&sfp_type;
6489         info.length = sizeof(sfp_type);
6490         ret = hns3_get_module_eeprom(dev, &info);
6491         if (ret)
6492                 return ret;
6493
6494         switch (sfp_type.type) {
6495         case HNS3_SFF8024_ID_SFP:
6496                 modinfo->type = RTE_ETH_MODULE_SFF_8472;
6497                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
6498                 break;
6499         case HNS3_SFF8024_ID_QSFP_8438:
6500                 modinfo->type = RTE_ETH_MODULE_SFF_8436;
6501                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
6502                 break;
6503         case HNS3_SFF8024_ID_QSFP_8436_8636:
6504                 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
6505                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
6506                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
6507                 } else {
6508                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
6509                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
6510                 }
6511                 break;
6512         case HNS3_SFF8024_ID_QSFP28_8636:
6513                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
6514                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
6515                 break;
6516         default:
6517                 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n",
6518                          sfp_type.type, sfp_type.ext_type);
6519                 return -EINVAL;
6520         }
6521
6522         return 0;
6523 }
6524
6525 static const struct eth_dev_ops hns3_eth_dev_ops = {
6526         .dev_configure      = hns3_dev_configure,
6527         .dev_start          = hns3_dev_start,
6528         .dev_stop           = hns3_dev_stop,
6529         .dev_close          = hns3_dev_close,
6530         .promiscuous_enable = hns3_dev_promiscuous_enable,
6531         .promiscuous_disable = hns3_dev_promiscuous_disable,
6532         .allmulticast_enable  = hns3_dev_allmulticast_enable,
6533         .allmulticast_disable = hns3_dev_allmulticast_disable,
6534         .mtu_set            = hns3_dev_mtu_set,
6535         .stats_get          = hns3_stats_get,
6536         .stats_reset        = hns3_stats_reset,
6537         .xstats_get         = hns3_dev_xstats_get,
6538         .xstats_get_names   = hns3_dev_xstats_get_names,
6539         .xstats_reset       = hns3_dev_xstats_reset,
6540         .xstats_get_by_id   = hns3_dev_xstats_get_by_id,
6541         .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
6542         .dev_infos_get          = hns3_dev_infos_get,
6543         .fw_version_get         = hns3_fw_version_get,
6544         .rx_queue_setup         = hns3_rx_queue_setup,
6545         .tx_queue_setup         = hns3_tx_queue_setup,
6546         .rx_queue_release       = hns3_dev_rx_queue_release,
6547         .tx_queue_release       = hns3_dev_tx_queue_release,
6548         .rx_queue_start         = hns3_dev_rx_queue_start,
6549         .rx_queue_stop          = hns3_dev_rx_queue_stop,
6550         .tx_queue_start         = hns3_dev_tx_queue_start,
6551         .tx_queue_stop          = hns3_dev_tx_queue_stop,
6552         .rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
6553         .rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
6554         .rxq_info_get           = hns3_rxq_info_get,
6555         .txq_info_get           = hns3_txq_info_get,
6556         .rx_burst_mode_get      = hns3_rx_burst_mode_get,
6557         .tx_burst_mode_get      = hns3_tx_burst_mode_get,
6558         .flow_ctrl_get          = hns3_flow_ctrl_get,
6559         .flow_ctrl_set          = hns3_flow_ctrl_set,
6560         .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,
6561         .mac_addr_add           = hns3_add_mac_addr,
6562         .mac_addr_remove        = hns3_remove_mac_addr,
6563         .mac_addr_set           = hns3_set_default_mac_addr,
6564         .set_mc_addr_list       = hns3_set_mc_mac_addr_list,
6565         .link_update            = hns3_dev_link_update,
6566         .dev_set_link_up        = hns3_dev_set_link_up,
6567         .dev_set_link_down      = hns3_dev_set_link_down,
6568         .rss_hash_update        = hns3_dev_rss_hash_update,
6569         .rss_hash_conf_get      = hns3_dev_rss_hash_conf_get,
6570         .reta_update            = hns3_dev_rss_reta_update,
6571         .reta_query             = hns3_dev_rss_reta_query,
6572         .flow_ops_get           = hns3_dev_flow_ops_get,
6573         .vlan_filter_set        = hns3_vlan_filter_set,
6574         .vlan_tpid_set          = hns3_vlan_tpid_set,
6575         .vlan_offload_set       = hns3_vlan_offload_set,
6576         .vlan_pvid_set          = hns3_vlan_pvid_set,
6577         .get_reg                = hns3_get_regs,
6578         .get_module_info        = hns3_get_module_info,
6579         .get_module_eeprom      = hns3_get_module_eeprom,
6580         .get_dcb_info           = hns3_get_dcb_info,
6581         .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
6582         .fec_get_capability     = hns3_fec_get_capability,
6583         .fec_get                = hns3_fec_get,
6584         .fec_set                = hns3_fec_set,
6585         .tm_ops_get             = hns3_tm_ops_get,
6586         .tx_done_cleanup        = hns3_tx_done_cleanup,
6587         .timesync_enable            = hns3_timesync_enable,
6588         .timesync_disable           = hns3_timesync_disable,
6589         .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp,
6590         .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp,
6591         .timesync_adjust_time       = hns3_timesync_adjust_time,
6592         .timesync_read_time         = hns3_timesync_read_time,
6593         .timesync_write_time        = hns3_timesync_write_time,
6594 };
6595
6596 static const struct hns3_reset_ops hns3_reset_ops = {
6597         .reset_service       = hns3_reset_service,
6598         .stop_service        = hns3_stop_service,
6599         .prepare_reset       = hns3_prepare_reset,
6600         .wait_hardware_ready = hns3_wait_hardware_ready,
6601         .reinit_dev          = hns3_reinit_dev,
6602         .restore_conf        = hns3_restore_conf,
6603         .start_service       = hns3_start_service,
6604 };
6605
6606 static void
6607 hns3_init_hw_ops(struct hns3_hw *hw)
6608 {
6609         hw->ops.add_mc_mac_addr = hns3_add_mc_mac_addr;
6610         hw->ops.del_mc_mac_addr = hns3_remove_mc_mac_addr;
6611         hw->ops.add_uc_mac_addr = hns3_add_uc_mac_addr;
6612         hw->ops.del_uc_mac_addr = hns3_remove_uc_mac_addr;
6613         hw->ops.bind_ring_with_vector = hns3_bind_ring_with_vector;
6614 }
6615
6616 static int
6617 hns3_dev_init(struct rte_eth_dev *eth_dev)
6618 {
6619         struct hns3_adapter *hns = eth_dev->data->dev_private;
6620         struct hns3_hw *hw = &hns->hw;
6621         int ret;
6622
6623         PMD_INIT_FUNC_TRACE();
6624
6625         hns3_flow_init(eth_dev);
6626
6627         hns3_set_rxtx_function(eth_dev);
6628         eth_dev->dev_ops = &hns3_eth_dev_ops;
6629         eth_dev->rx_queue_count = hns3_rx_queue_count;
6630         ret = hns3_mp_init(eth_dev);
6631         if (ret)
6632                 goto err_mp_init;
6633
6634         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
6635                 hns3_tx_push_init(eth_dev);
6636                 return 0;
6637         }
6638
6639         hw->adapter_state = HNS3_NIC_UNINITIALIZED;
6640         hns->is_vf = false;
6641         hw->data = eth_dev->data;
6642         hns3_parse_devargs(eth_dev);
6643
6644         /*
6645          * Set default max packet size according to the mtu
6646          * default vale in DPDK frame.
6647          */
6648         hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
6649
6650         ret = hns3_reset_init(hw);
6651         if (ret)
6652                 goto err_init_reset;
6653         hw->reset.ops = &hns3_reset_ops;
6654
6655         hns3_init_hw_ops(hw);
6656         ret = hns3_init_pf(eth_dev);
6657         if (ret) {
6658                 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
6659                 goto err_init_pf;
6660         }
6661
6662         ret = hns3_init_mac_addrs(eth_dev);
6663         if (ret != 0)
6664                 goto err_init_mac_addrs;
6665
6666         hw->adapter_state = HNS3_NIC_INITIALIZED;
6667
6668         if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
6669                             SCHEDULE_PENDING) {
6670                 hns3_err(hw, "Reschedule reset service after dev_init");
6671                 hns3_schedule_reset(hns);
6672         } else {
6673                 /* IMP will wait ready flag before reset */
6674                 hns3_notify_reset_ready(hw, false);
6675         }
6676
6677         hns3_info(hw, "hns3 dev initialization successful!");
6678         return 0;
6679
6680 err_init_mac_addrs:
6681         hns3_uninit_pf(eth_dev);
6682
6683 err_init_pf:
6684         rte_free(hw->reset.wait_data);
6685
6686 err_init_reset:
6687         hns3_mp_uninit(eth_dev);
6688
6689 err_mp_init:
6690         eth_dev->dev_ops = NULL;
6691         eth_dev->rx_pkt_burst = NULL;
6692         eth_dev->rx_descriptor_status = NULL;
6693         eth_dev->tx_pkt_burst = NULL;
6694         eth_dev->tx_pkt_prepare = NULL;
6695         eth_dev->tx_descriptor_status = NULL;
6696         return ret;
6697 }
6698
6699 static int
6700 hns3_dev_uninit(struct rte_eth_dev *eth_dev)
6701 {
6702         struct hns3_adapter *hns = eth_dev->data->dev_private;
6703         struct hns3_hw *hw = &hns->hw;
6704
6705         PMD_INIT_FUNC_TRACE();
6706
6707         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
6708                 hns3_mp_uninit(eth_dev);
6709                 return 0;
6710         }
6711
6712         if (hw->adapter_state < HNS3_NIC_CLOSING)
6713                 hns3_dev_close(eth_dev);
6714
6715         hw->adapter_state = HNS3_NIC_REMOVED;
6716         return 0;
6717 }
6718
6719 static int
6720 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
6721                    struct rte_pci_device *pci_dev)
6722 {
6723         return rte_eth_dev_pci_generic_probe(pci_dev,
6724                                              sizeof(struct hns3_adapter),
6725                                              hns3_dev_init);
6726 }
6727
6728 static int
6729 eth_hns3_pci_remove(struct rte_pci_device *pci_dev)
6730 {
6731         return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit);
6732 }
6733
6734 static const struct rte_pci_id pci_id_hns3_map[] = {
6735         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) },
6736         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) },
6737         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) },
6738         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
6739         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
6740         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) },
6741         { .vendor_id = 0, }, /* sentinel */
6742 };
6743
6744 static struct rte_pci_driver rte_hns3_pmd = {
6745         .id_table = pci_id_hns3_map,
6746         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
6747         .probe = eth_hns3_pci_probe,
6748         .remove = eth_hns3_pci_remove,
6749 };
6750
6751 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
6752 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
6753 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
6754 RTE_PMD_REGISTER_PARAM_STRING(net_hns3,
6755                 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
6756                 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
6757                 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
6758                 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16> ");
6759 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE);
6760 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE);