181694bf8cdee59d7273b15b6e12211b20a64063
[dpdk.git] / drivers / net / hns3 / hns3_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <rte_alarm.h>
6 #include <rte_bus_pci.h>
7 #include <ethdev_pci.h>
8 #include <rte_pci.h>
9
10 #include "hns3_ethdev.h"
11 #include "hns3_common.h"
12 #include "hns3_logs.h"
13 #include "hns3_rxtx.h"
14 #include "hns3_intr.h"
15 #include "hns3_regs.h"
16 #include "hns3_dcb.h"
17 #include "hns3_mp.h"
18 #include "hns3_flow.h"
19
20 #define HNS3_SERVICE_INTERVAL           1000000 /* us */
21 #define HNS3_SERVICE_QUICK_INTERVAL     10
22 #define HNS3_INVALID_PVID               0xFFFF
23
24 #define HNS3_FILTER_TYPE_VF             0
25 #define HNS3_FILTER_TYPE_PORT           1
26 #define HNS3_FILTER_FE_EGRESS_V1_B      BIT(0)
27 #define HNS3_FILTER_FE_NIC_INGRESS_B    BIT(0)
28 #define HNS3_FILTER_FE_NIC_EGRESS_B     BIT(1)
29 #define HNS3_FILTER_FE_ROCE_INGRESS_B   BIT(2)
30 #define HNS3_FILTER_FE_ROCE_EGRESS_B    BIT(3)
31 #define HNS3_FILTER_FE_EGRESS           (HNS3_FILTER_FE_NIC_EGRESS_B \
32                                         | HNS3_FILTER_FE_ROCE_EGRESS_B)
33 #define HNS3_FILTER_FE_INGRESS          (HNS3_FILTER_FE_NIC_INGRESS_B \
34                                         | HNS3_FILTER_FE_ROCE_INGRESS_B)
35
36 /* Reset related Registers */
37 #define HNS3_GLOBAL_RESET_BIT           0
38 #define HNS3_CORE_RESET_BIT             1
39 #define HNS3_IMP_RESET_BIT              2
40 #define HNS3_FUN_RST_ING_B              0
41
42 #define HNS3_VECTOR0_IMP_RESET_INT_B    1
43 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B     4U
44 #define HNS3_VECTOR0_IMP_RD_POISON_B    5U
45 #define HNS3_VECTOR0_ALL_MSIX_ERR_B     6U
46
47 #define HNS3_RESET_WAIT_MS      100
48 #define HNS3_RESET_WAIT_CNT     200
49
50 /* FEC mode order defined in HNS3 hardware */
51 #define HNS3_HW_FEC_MODE_NOFEC  0
52 #define HNS3_HW_FEC_MODE_BASER  1
53 #define HNS3_HW_FEC_MODE_RS     2
54
55 enum hns3_evt_cause {
56         HNS3_VECTOR0_EVENT_RST,
57         HNS3_VECTOR0_EVENT_MBX,
58         HNS3_VECTOR0_EVENT_ERR,
59         HNS3_VECTOR0_EVENT_PTP,
60         HNS3_VECTOR0_EVENT_OTHER,
61 };
62
63 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
64         { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
65                              RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
66                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
67
68         { RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
69                              RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
70                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
71                              RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
72
73         { RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
74                              RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
75                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
76
77         { RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
78                              RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
79                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
80                              RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
81
82         { RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
83                               RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
84                               RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
85
86         { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
87                               RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
88                               RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
89 };
90
91 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
92                                                  uint64_t *levels);
93 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
94 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
95                                     int on);
96 static int hns3_update_link_info(struct rte_eth_dev *eth_dev);
97 static bool hns3_update_link_status(struct hns3_hw *hw);
98
99 static int hns3_add_mc_mac_addr(struct hns3_hw *hw,
100                                 struct rte_ether_addr *mac_addr);
101 static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
102                                    struct rte_ether_addr *mac_addr);
103 static int hns3_restore_fec(struct hns3_hw *hw);
104 static int hns3_query_dev_fec_info(struct hns3_hw *hw);
105 static int hns3_do_stop(struct hns3_adapter *hns);
106 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds);
107 static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable);
108
109
110 static void
111 hns3_pf_disable_irq0(struct hns3_hw *hw)
112 {
113         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
114 }
115
116 static void
117 hns3_pf_enable_irq0(struct hns3_hw *hw)
118 {
119         hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
120 }
121
122 static enum hns3_evt_cause
123 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay,
124                           uint32_t *vec_val)
125 {
126         struct hns3_hw *hw = &hns->hw;
127
128         __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
129         hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
130         *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
131         if (!is_delay) {
132                 hw->reset.stats.imp_cnt++;
133                 hns3_warn(hw, "IMP reset detected, clear reset status");
134         } else {
135                 hns3_schedule_delayed_reset(hns);
136                 hns3_warn(hw, "IMP reset detected, don't clear reset status");
137         }
138
139         return HNS3_VECTOR0_EVENT_RST;
140 }
141
142 static enum hns3_evt_cause
143 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay,
144                              uint32_t *vec_val)
145 {
146         struct hns3_hw *hw = &hns->hw;
147
148         __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
149         hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
150         *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
151         if (!is_delay) {
152                 hw->reset.stats.global_cnt++;
153                 hns3_warn(hw, "Global reset detected, clear reset status");
154         } else {
155                 hns3_schedule_delayed_reset(hns);
156                 hns3_warn(hw,
157                           "Global reset detected, don't clear reset status");
158         }
159
160         return HNS3_VECTOR0_EVENT_RST;
161 }
162
163 static enum hns3_evt_cause
164 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
165 {
166         struct hns3_hw *hw = &hns->hw;
167         uint32_t vector0_int_stats;
168         uint32_t cmdq_src_val;
169         uint32_t hw_err_src_reg;
170         uint32_t val;
171         enum hns3_evt_cause ret;
172         bool is_delay;
173
174         /* fetch the events from their corresponding regs */
175         vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
176         cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
177         hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
178
179         is_delay = clearval == NULL ? true : false;
180         /*
181          * Assumption: If by any chance reset and mailbox events are reported
182          * together then we will only process reset event and defer the
183          * processing of the mailbox events. Since, we would have not cleared
184          * RX CMDQ event this time we would receive again another interrupt
185          * from H/W just for the mailbox.
186          */
187         if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */
188                 ret = hns3_proc_imp_reset_event(hns, is_delay, &val);
189                 goto out;
190         }
191
192         /* Global reset */
193         if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) {
194                 ret = hns3_proc_global_reset_event(hns, is_delay, &val);
195                 goto out;
196         }
197
198         /* Check for vector0 1588 event source */
199         if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) {
200                 val = BIT(HNS3_VECTOR0_1588_INT_B);
201                 ret = HNS3_VECTOR0_EVENT_PTP;
202                 goto out;
203         }
204
205         /* check for vector0 msix event source */
206         if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK ||
207             hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) {
208                 val = vector0_int_stats | hw_err_src_reg;
209                 ret = HNS3_VECTOR0_EVENT_ERR;
210                 goto out;
211         }
212
213         /* check for vector0 mailbox(=CMDQ RX) event source */
214         if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) {
215                 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
216                 val = cmdq_src_val;
217                 ret = HNS3_VECTOR0_EVENT_MBX;
218                 goto out;
219         }
220
221         val = vector0_int_stats;
222         ret = HNS3_VECTOR0_EVENT_OTHER;
223 out:
224
225         if (clearval)
226                 *clearval = val;
227         return ret;
228 }
229
230 static bool
231 hns3_is_1588_event_type(uint32_t event_type)
232 {
233         return (event_type == HNS3_VECTOR0_EVENT_PTP);
234 }
235
236 static void
237 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
238 {
239         if (event_type == HNS3_VECTOR0_EVENT_RST ||
240             hns3_is_1588_event_type(event_type))
241                 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
242         else if (event_type == HNS3_VECTOR0_EVENT_MBX)
243                 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
244 }
245
246 static void
247 hns3_clear_all_event_cause(struct hns3_hw *hw)
248 {
249         uint32_t vector0_int_stats;
250
251         vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
252         if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats)
253                 hns3_warn(hw, "Probe during IMP reset interrupt");
254
255         if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats)
256                 hns3_warn(hw, "Probe during Global reset interrupt");
257
258         hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST,
259                                BIT(HNS3_VECTOR0_IMPRESET_INT_B) |
260                                BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) |
261                                BIT(HNS3_VECTOR0_CORERESET_INT_B));
262         hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0);
263         hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP,
264                                 BIT(HNS3_VECTOR0_1588_INT_B));
265 }
266
267 static void
268 hns3_handle_mac_tnl(struct hns3_hw *hw)
269 {
270         struct hns3_cmd_desc desc;
271         uint32_t status;
272         int ret;
273
274         /* query and clear mac tnl interrupt */
275         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true);
276         ret = hns3_cmd_send(hw, &desc, 1);
277         if (ret) {
278                 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret);
279                 return;
280         }
281
282         status = rte_le_to_cpu_32(desc.data[0]);
283         if (status) {
284                 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status);
285                 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT,
286                                           false);
287                 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR);
288                 ret = hns3_cmd_send(hw, &desc, 1);
289                 if (ret)
290                         hns3_err(hw, "failed to clear mac tnl int, ret = %d.",
291                                  ret);
292         }
293 }
294
295 static void
296 hns3_interrupt_handler(void *param)
297 {
298         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
299         struct hns3_adapter *hns = dev->data->dev_private;
300         struct hns3_hw *hw = &hns->hw;
301         enum hns3_evt_cause event_cause;
302         uint32_t clearval = 0;
303         uint32_t vector0_int;
304         uint32_t ras_int;
305         uint32_t cmdq_int;
306
307         /* Disable interrupt */
308         hns3_pf_disable_irq0(hw);
309
310         event_cause = hns3_check_event_cause(hns, &clearval);
311         vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
312         ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
313         cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
314         hns3_clear_event_cause(hw, event_cause, clearval);
315         /* vector 0 interrupt is shared with reset and mailbox source events. */
316         if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
317                 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x "
318                           "ras_int_stat:0x%x cmdq_int_stat:0x%x",
319                           vector0_int, ras_int, cmdq_int);
320                 hns3_handle_mac_tnl(hw);
321                 hns3_handle_error(hns);
322         } else if (event_cause == HNS3_VECTOR0_EVENT_RST) {
323                 hns3_warn(hw, "received reset interrupt");
324                 hns3_schedule_reset(hns);
325         } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) {
326                 hns3_dev_handle_mbx_msg(hw);
327         } else {
328                 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x "
329                           "ras_int_stat:0x%x cmdq_int_stat:0x%x",
330                           vector0_int, ras_int, cmdq_int);
331         }
332
333         /* Enable interrupt if it is not cause by reset */
334         hns3_pf_enable_irq0(hw);
335 }
336
337 static int
338 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on)
339 {
340 #define HNS3_VLAN_ID_OFFSET_STEP        160
341 #define HNS3_VLAN_BYTE_SIZE             8
342         struct hns3_vlan_filter_pf_cfg_cmd *req;
343         struct hns3_hw *hw = &hns->hw;
344         uint8_t vlan_offset_byte_val;
345         struct hns3_cmd_desc desc;
346         uint8_t vlan_offset_byte;
347         uint8_t vlan_offset_base;
348         int ret;
349
350         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false);
351
352         vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP;
353         vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) /
354                            HNS3_VLAN_BYTE_SIZE;
355         vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE);
356
357         req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data;
358         req->vlan_offset = vlan_offset_base;
359         req->vlan_cfg = on ? 0 : 1;
360         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
361
362         ret = hns3_cmd_send(hw, &desc, 1);
363         if (ret)
364                 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d",
365                          vlan_id, ret);
366
367         return ret;
368 }
369
370 static void
371 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id)
372 {
373         struct hns3_user_vlan_table *vlan_entry;
374         struct hns3_pf *pf = &hns->pf;
375
376         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
377                 if (vlan_entry->vlan_id == vlan_id) {
378                         if (vlan_entry->hd_tbl_status)
379                                 hns3_set_port_vlan_filter(hns, vlan_id, 0);
380                         LIST_REMOVE(vlan_entry, next);
381                         rte_free(vlan_entry);
382                         break;
383                 }
384         }
385 }
386
387 static void
388 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
389                         bool writen_to_tbl)
390 {
391         struct hns3_user_vlan_table *vlan_entry;
392         struct hns3_hw *hw = &hns->hw;
393         struct hns3_pf *pf = &hns->pf;
394
395         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
396                 if (vlan_entry->vlan_id == vlan_id)
397                         return;
398         }
399
400         vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0);
401         if (vlan_entry == NULL) {
402                 hns3_err(hw, "Failed to malloc hns3 vlan table");
403                 return;
404         }
405
406         vlan_entry->hd_tbl_status = writen_to_tbl;
407         vlan_entry->vlan_id = vlan_id;
408
409         LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);
410 }
411
412 static int
413 hns3_restore_vlan_table(struct hns3_adapter *hns)
414 {
415         struct hns3_user_vlan_table *vlan_entry;
416         struct hns3_hw *hw = &hns->hw;
417         struct hns3_pf *pf = &hns->pf;
418         uint16_t vlan_id;
419         int ret = 0;
420
421         if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE)
422                 return hns3_vlan_pvid_configure(hns,
423                                                 hw->port_base_vlan_cfg.pvid, 1);
424
425         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
426                 if (vlan_entry->hd_tbl_status) {
427                         vlan_id = vlan_entry->vlan_id;
428                         ret = hns3_set_port_vlan_filter(hns, vlan_id, 1);
429                         if (ret)
430                                 break;
431                 }
432         }
433
434         return ret;
435 }
436
437 static int
438 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
439 {
440         struct hns3_hw *hw = &hns->hw;
441         bool writen_to_tbl = false;
442         int ret = 0;
443
444         /*
445          * When vlan filter is enabled, hardware regards packets without vlan
446          * as packets with vlan 0. So, to receive packets without vlan, vlan id
447          * 0 is not allowed to be removed by rte_eth_dev_vlan_filter.
448          */
449         if (on == 0 && vlan_id == 0)
450                 return 0;
451
452         /*
453          * When port base vlan enabled, we use port base vlan as the vlan
454          * filter condition. In this case, we don't update vlan filter table
455          * when user add new vlan or remove exist vlan, just update the
456          * vlan list. The vlan id in vlan list will be written in vlan filter
457          * table until port base vlan disabled
458          */
459         if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
460                 ret = hns3_set_port_vlan_filter(hns, vlan_id, on);
461                 writen_to_tbl = true;
462         }
463
464         if (ret == 0) {
465                 if (on)
466                         hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);
467                 else
468                         hns3_rm_dev_vlan_table(hns, vlan_id);
469         }
470         return ret;
471 }
472
473 static int
474 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
475 {
476         struct hns3_adapter *hns = dev->data->dev_private;
477         struct hns3_hw *hw = &hns->hw;
478         int ret;
479
480         rte_spinlock_lock(&hw->lock);
481         ret = hns3_vlan_filter_configure(hns, vlan_id, on);
482         rte_spinlock_unlock(&hw->lock);
483         return ret;
484 }
485
486 static int
487 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
488                          uint16_t tpid)
489 {
490         struct hns3_rx_vlan_type_cfg_cmd *rx_req;
491         struct hns3_tx_vlan_type_cfg_cmd *tx_req;
492         struct hns3_hw *hw = &hns->hw;
493         struct hns3_cmd_desc desc;
494         int ret;
495
496         if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
497              vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
498                 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
499                 return -EINVAL;
500         }
501
502         if (tpid != RTE_ETHER_TYPE_VLAN) {
503                 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type);
504                 return -EINVAL;
505         }
506
507         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
508         rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
509
510         if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
511                 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
512                 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
513         } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
514                 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
515                 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
516                 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
517                 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid);
518         }
519
520         ret = hns3_cmd_send(hw, &desc, 1);
521         if (ret) {
522                 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d",
523                          ret);
524                 return ret;
525         }
526
527         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false);
528
529         tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data;
530         tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid);
531         tx_req->in_vlan_type = rte_cpu_to_le_16(tpid);
532
533         ret = hns3_cmd_send(hw, &desc, 1);
534         if (ret)
535                 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d",
536                          ret);
537         return ret;
538 }
539
540 static int
541 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
542                    uint16_t tpid)
543 {
544         struct hns3_adapter *hns = dev->data->dev_private;
545         struct hns3_hw *hw = &hns->hw;
546         int ret;
547
548         rte_spinlock_lock(&hw->lock);
549         ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid);
550         rte_spinlock_unlock(&hw->lock);
551         return ret;
552 }
553
554 static int
555 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
556                              struct hns3_rx_vtag_cfg *vcfg)
557 {
558         struct hns3_vport_vtag_rx_cfg_cmd *req;
559         struct hns3_hw *hw = &hns->hw;
560         struct hns3_cmd_desc desc;
561         uint16_t vport_id;
562         uint8_t bitmap;
563         int ret;
564
565         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false);
566
567         req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data;
568         hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B,
569                      vcfg->strip_tag1_en ? 1 : 0);
570         hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B,
571                      vcfg->strip_tag2_en ? 1 : 0);
572         hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B,
573                      vcfg->vlan1_vlan_prionly ? 1 : 0);
574         hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
575                      vcfg->vlan2_vlan_prionly ? 1 : 0);
576
577         /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */
578         hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B,
579                      vcfg->strip_tag1_discard_en ? 1 : 0);
580         hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B,
581                      vcfg->strip_tag2_discard_en ? 1 : 0);
582         /*
583          * In current version VF is not supported when PF is driven by DPDK
584          * driver, just need to configure parameters for PF vport.
585          */
586         vport_id = HNS3_PF_FUNC_ID;
587         req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
588         bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
589         req->vf_bitmap[req->vf_offset] = bitmap;
590
591         ret = hns3_cmd_send(hw, &desc, 1);
592         if (ret)
593                 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret);
594         return ret;
595 }
596
597 static void
598 hns3_update_rx_offload_cfg(struct hns3_adapter *hns,
599                            struct hns3_rx_vtag_cfg *vcfg)
600 {
601         struct hns3_pf *pf = &hns->pf;
602         memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg));
603 }
604
605 static void
606 hns3_update_tx_offload_cfg(struct hns3_adapter *hns,
607                            struct hns3_tx_vtag_cfg *vcfg)
608 {
609         struct hns3_pf *pf = &hns->pf;
610         memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg));
611 }
612
613 static int
614 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
615 {
616         struct hns3_rx_vtag_cfg rxvlan_cfg;
617         struct hns3_hw *hw = &hns->hw;
618         int ret;
619
620         if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
621                 rxvlan_cfg.strip_tag1_en = false;
622                 rxvlan_cfg.strip_tag2_en = enable;
623                 rxvlan_cfg.strip_tag2_discard_en = false;
624         } else {
625                 rxvlan_cfg.strip_tag1_en = enable;
626                 rxvlan_cfg.strip_tag2_en = true;
627                 rxvlan_cfg.strip_tag2_discard_en = true;
628         }
629
630         rxvlan_cfg.strip_tag1_discard_en = false;
631         rxvlan_cfg.vlan1_vlan_prionly = false;
632         rxvlan_cfg.vlan2_vlan_prionly = false;
633         rxvlan_cfg.rx_vlan_offload_en = enable;
634
635         ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
636         if (ret) {
637                 hns3_err(hw, "%s strip rx vtag failed, ret = %d.",
638                                 enable ? "enable" : "disable", ret);
639                 return ret;
640         }
641
642         hns3_update_rx_offload_cfg(hns, &rxvlan_cfg);
643
644         return ret;
645 }
646
647 static int
648 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type,
649                           uint8_t fe_type, bool filter_en, uint8_t vf_id)
650 {
651         struct hns3_vlan_filter_ctrl_cmd *req;
652         struct hns3_cmd_desc desc;
653         int ret;
654
655         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false);
656
657         req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data;
658         req->vlan_type = vlan_type;
659         req->vlan_fe = filter_en ? fe_type : 0;
660         req->vf_id = vf_id;
661
662         ret = hns3_cmd_send(hw, &desc, 1);
663         if (ret)
664                 hns3_err(hw, "set vlan filter fail, ret =%d", ret);
665
666         return ret;
667 }
668
669 static int
670 hns3_vlan_filter_init(struct hns3_adapter *hns)
671 {
672         struct hns3_hw *hw = &hns->hw;
673         int ret;
674
675         ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF,
676                                         HNS3_FILTER_FE_EGRESS, false,
677                                         HNS3_PF_FUNC_ID);
678         if (ret) {
679                 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret);
680                 return ret;
681         }
682
683         ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
684                                         HNS3_FILTER_FE_INGRESS, false,
685                                         HNS3_PF_FUNC_ID);
686         if (ret)
687                 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret);
688
689         return ret;
690 }
691
692 static int
693 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable)
694 {
695         struct hns3_hw *hw = &hns->hw;
696         int ret;
697
698         ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
699                                         HNS3_FILTER_FE_INGRESS, enable,
700                                         HNS3_PF_FUNC_ID);
701         if (ret)
702                 hns3_err(hw, "failed to %s port vlan filter, ret = %d",
703                          enable ? "enable" : "disable", ret);
704
705         return ret;
706 }
707
708 static int
709 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
710 {
711         struct hns3_adapter *hns = dev->data->dev_private;
712         struct hns3_hw *hw = &hns->hw;
713         struct rte_eth_rxmode *rxmode;
714         unsigned int tmp_mask;
715         bool enable;
716         int ret = 0;
717
718         rte_spinlock_lock(&hw->lock);
719         rxmode = &dev->data->dev_conf.rxmode;
720         tmp_mask = (unsigned int)mask;
721         if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
722                 /* ignore vlan filter configuration during promiscuous mode */
723                 if (!dev->data->promiscuous) {
724                         /* Enable or disable VLAN filter */
725                         enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
726                                  true : false;
727
728                         ret = hns3_enable_vlan_filter(hns, enable);
729                         if (ret) {
730                                 rte_spinlock_unlock(&hw->lock);
731                                 hns3_err(hw, "failed to %s rx filter, ret = %d",
732                                          enable ? "enable" : "disable", ret);
733                                 return ret;
734                         }
735                 }
736         }
737
738         if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
739                 /* Enable or disable VLAN stripping */
740                 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
741                     true : false;
742
743                 ret = hns3_en_hw_strip_rxvtag(hns, enable);
744                 if (ret) {
745                         rte_spinlock_unlock(&hw->lock);
746                         hns3_err(hw, "failed to %s rx strip, ret = %d",
747                                  enable ? "enable" : "disable", ret);
748                         return ret;
749                 }
750         }
751
752         rte_spinlock_unlock(&hw->lock);
753
754         return ret;
755 }
756
757 static int
758 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
759                              struct hns3_tx_vtag_cfg *vcfg)
760 {
761         struct hns3_vport_vtag_tx_cfg_cmd *req;
762         struct hns3_cmd_desc desc;
763         struct hns3_hw *hw = &hns->hw;
764         uint16_t vport_id;
765         uint8_t bitmap;
766         int ret;
767
768         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false);
769
770         req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data;
771         req->def_vlan_tag1 = vcfg->default_tag1;
772         req->def_vlan_tag2 = vcfg->default_tag2;
773         hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B,
774                      vcfg->accept_tag1 ? 1 : 0);
775         hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B,
776                      vcfg->accept_untag1 ? 1 : 0);
777         hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B,
778                      vcfg->accept_tag2 ? 1 : 0);
779         hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B,
780                      vcfg->accept_untag2 ? 1 : 0);
781         hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B,
782                      vcfg->insert_tag1_en ? 1 : 0);
783         hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B,
784                      vcfg->insert_tag2_en ? 1 : 0);
785         hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
786
787         /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */
788         hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B,
789                      vcfg->tag_shift_mode_en ? 1 : 0);
790
791         /*
792          * In current version VF is not supported when PF is driven by DPDK
793          * driver, just need to configure parameters for PF vport.
794          */
795         vport_id = HNS3_PF_FUNC_ID;
796         req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
797         bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
798         req->vf_bitmap[req->vf_offset] = bitmap;
799
800         ret = hns3_cmd_send(hw, &desc, 1);
801         if (ret)
802                 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret);
803
804         return ret;
805 }
806
807 static int
808 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state,
809                      uint16_t pvid)
810 {
811         struct hns3_hw *hw = &hns->hw;
812         struct hns3_tx_vtag_cfg txvlan_cfg;
813         int ret;
814
815         if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) {
816                 txvlan_cfg.accept_tag1 = true;
817                 txvlan_cfg.insert_tag1_en = false;
818                 txvlan_cfg.default_tag1 = 0;
819         } else {
820                 txvlan_cfg.accept_tag1 =
821                         hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE;
822                 txvlan_cfg.insert_tag1_en = true;
823                 txvlan_cfg.default_tag1 = pvid;
824         }
825
826         txvlan_cfg.accept_untag1 = true;
827         txvlan_cfg.accept_tag2 = true;
828         txvlan_cfg.accept_untag2 = true;
829         txvlan_cfg.insert_tag2_en = false;
830         txvlan_cfg.default_tag2 = 0;
831         txvlan_cfg.tag_shift_mode_en = true;
832
833         ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg);
834         if (ret) {
835                 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid,
836                          ret);
837                 return ret;
838         }
839
840         hns3_update_tx_offload_cfg(hns, &txvlan_cfg);
841         return ret;
842 }
843
844
845 static void
846 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list)
847 {
848         struct hns3_user_vlan_table *vlan_entry;
849         struct hns3_pf *pf = &hns->pf;
850
851         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
852                 if (vlan_entry->hd_tbl_status) {
853                         hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0);
854                         vlan_entry->hd_tbl_status = false;
855                 }
856         }
857
858         if (is_del_list) {
859                 vlan_entry = LIST_FIRST(&pf->vlan_list);
860                 while (vlan_entry) {
861                         LIST_REMOVE(vlan_entry, next);
862                         rte_free(vlan_entry);
863                         vlan_entry = LIST_FIRST(&pf->vlan_list);
864                 }
865         }
866 }
867
868 static void
869 hns3_add_all_vlan_table(struct hns3_adapter *hns)
870 {
871         struct hns3_user_vlan_table *vlan_entry;
872         struct hns3_pf *pf = &hns->pf;
873
874         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
875                 if (!vlan_entry->hd_tbl_status) {
876                         hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1);
877                         vlan_entry->hd_tbl_status = true;
878                 }
879         }
880 }
881
882 static void
883 hns3_remove_all_vlan_table(struct hns3_adapter *hns)
884 {
885         struct hns3_hw *hw = &hns->hw;
886         int ret;
887
888         hns3_rm_all_vlan_table(hns, true);
889         if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) {
890                 ret = hns3_set_port_vlan_filter(hns,
891                                                 hw->port_base_vlan_cfg.pvid, 0);
892                 if (ret) {
893                         hns3_err(hw, "Failed to remove all vlan table, ret =%d",
894                                  ret);
895                         return;
896                 }
897         }
898 }
899
900 static int
901 hns3_update_vlan_filter_entries(struct hns3_adapter *hns,
902                         uint16_t port_base_vlan_state, uint16_t new_pvid)
903 {
904         struct hns3_hw *hw = &hns->hw;
905         uint16_t old_pvid;
906         int ret;
907
908         if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) {
909                 old_pvid = hw->port_base_vlan_cfg.pvid;
910                 if (old_pvid != HNS3_INVALID_PVID) {
911                         ret = hns3_set_port_vlan_filter(hns, old_pvid, 0);
912                         if (ret) {
913                                 hns3_err(hw, "failed to remove old pvid %u, "
914                                                 "ret = %d", old_pvid, ret);
915                                 return ret;
916                         }
917                 }
918
919                 hns3_rm_all_vlan_table(hns, false);
920                 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1);
921                 if (ret) {
922                         hns3_err(hw, "failed to add new pvid %u, ret = %d",
923                                         new_pvid, ret);
924                         return ret;
925                 }
926         } else {
927                 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0);
928                 if (ret) {
929                         hns3_err(hw, "failed to remove pvid %u, ret = %d",
930                                         new_pvid, ret);
931                         return ret;
932                 }
933
934                 hns3_add_all_vlan_table(hns);
935         }
936         return 0;
937 }
938
939 static int
940 hns3_en_pvid_strip(struct hns3_adapter *hns, int on)
941 {
942         struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg;
943         struct hns3_rx_vtag_cfg rx_vlan_cfg;
944         bool rx_strip_en;
945         int ret;
946
947         rx_strip_en = old_cfg->rx_vlan_offload_en;
948         if (on) {
949                 rx_vlan_cfg.strip_tag1_en = rx_strip_en;
950                 rx_vlan_cfg.strip_tag2_en = true;
951                 rx_vlan_cfg.strip_tag2_discard_en = true;
952         } else {
953                 rx_vlan_cfg.strip_tag1_en = false;
954                 rx_vlan_cfg.strip_tag2_en = rx_strip_en;
955                 rx_vlan_cfg.strip_tag2_discard_en = false;
956         }
957         rx_vlan_cfg.strip_tag1_discard_en = false;
958         rx_vlan_cfg.vlan1_vlan_prionly = false;
959         rx_vlan_cfg.vlan2_vlan_prionly = false;
960         rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en;
961
962         ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg);
963         if (ret)
964                 return ret;
965
966         hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg);
967         return ret;
968 }
969
970 static int
971 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on)
972 {
973         struct hns3_hw *hw = &hns->hw;
974         uint16_t port_base_vlan_state;
975         int ret, err;
976
977         if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) {
978                 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID)
979                         hns3_warn(hw, "Invalid operation! As current pvid set "
980                                   "is %u, disable pvid %u is invalid",
981                                   hw->port_base_vlan_cfg.pvid, pvid);
982                 return 0;
983         }
984
985         port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE :
986                                     HNS3_PORT_BASE_VLAN_DISABLE;
987         ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid);
988         if (ret) {
989                 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d",
990                          ret);
991                 return ret;
992         }
993
994         ret = hns3_en_pvid_strip(hns, on);
995         if (ret) {
996                 hns3_err(hw, "failed to config rx vlan strip for pvid, "
997                          "ret = %d", ret);
998                 goto pvid_vlan_strip_fail;
999         }
1000
1001         if (pvid == HNS3_INVALID_PVID)
1002                 goto out;
1003         ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid);
1004         if (ret) {
1005                 hns3_err(hw, "failed to update vlan filter entries, ret = %d",
1006                          ret);
1007                 goto vlan_filter_set_fail;
1008         }
1009
1010 out:
1011         hw->port_base_vlan_cfg.state = port_base_vlan_state;
1012         hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID;
1013         return ret;
1014
1015 vlan_filter_set_fail:
1016         err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state ==
1017                                         HNS3_PORT_BASE_VLAN_ENABLE);
1018         if (err)
1019                 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err);
1020
1021 pvid_vlan_strip_fail:
1022         err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state,
1023                                         hw->port_base_vlan_cfg.pvid);
1024         if (err)
1025                 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err);
1026
1027         return ret;
1028 }
1029
1030 static int
1031 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1032 {
1033         struct hns3_adapter *hns = dev->data->dev_private;
1034         struct hns3_hw *hw = &hns->hw;
1035         bool pvid_en_state_change;
1036         uint16_t pvid_state;
1037         int ret;
1038
1039         if (pvid > RTE_ETHER_MAX_VLAN_ID) {
1040                 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid,
1041                          RTE_ETHER_MAX_VLAN_ID);
1042                 return -EINVAL;
1043         }
1044
1045         /*
1046          * If PVID configuration state change, should refresh the PVID
1047          * configuration state in struct hns3_tx_queue/hns3_rx_queue.
1048          */
1049         pvid_state = hw->port_base_vlan_cfg.state;
1050         if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) ||
1051             (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE))
1052                 pvid_en_state_change = false;
1053         else
1054                 pvid_en_state_change = true;
1055
1056         rte_spinlock_lock(&hw->lock);
1057         ret = hns3_vlan_pvid_configure(hns, pvid, on);
1058         rte_spinlock_unlock(&hw->lock);
1059         if (ret)
1060                 return ret;
1061         /*
1062          * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx
1063          * need be processed by PMD driver.
1064          */
1065         if (pvid_en_state_change &&
1066             hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1067                 hns3_update_all_queues_pvid_proc_en(hw);
1068
1069         return 0;
1070 }
1071
1072 static int
1073 hns3_default_vlan_config(struct hns3_adapter *hns)
1074 {
1075         struct hns3_hw *hw = &hns->hw;
1076         int ret;
1077
1078         /*
1079          * When vlan filter is enabled, hardware regards packets without vlan
1080          * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan
1081          * table, packets without vlan won't be received. So, add vlan 0 as
1082          * the default vlan.
1083          */
1084         ret = hns3_vlan_filter_configure(hns, 0, 1);
1085         if (ret)
1086                 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret);
1087         return ret;
1088 }
1089
1090 static int
1091 hns3_init_vlan_config(struct hns3_adapter *hns)
1092 {
1093         struct hns3_hw *hw = &hns->hw;
1094         int ret;
1095
1096         /*
1097          * This function can be called in the initialization and reset process,
1098          * when in reset process, it means that hardware had been reseted
1099          * successfully and we need to restore the hardware configuration to
1100          * ensure that the hardware configuration remains unchanged before and
1101          * after reset.
1102          */
1103         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1104                 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
1105                 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
1106         }
1107
1108         ret = hns3_vlan_filter_init(hns);
1109         if (ret) {
1110                 hns3_err(hw, "vlan init fail in pf, ret =%d", ret);
1111                 return ret;
1112         }
1113
1114         ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
1115                                        RTE_ETHER_TYPE_VLAN);
1116         if (ret) {
1117                 hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
1118                 return ret;
1119         }
1120
1121         /*
1122          * When in the reinit dev stage of the reset process, the following
1123          * vlan-related configurations may differ from those at initialization,
1124          * we will restore configurations to hardware in hns3_restore_vlan_table
1125          * and hns3_restore_vlan_conf later.
1126          */
1127         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1128                 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
1129                 if (ret) {
1130                         hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
1131                         return ret;
1132                 }
1133
1134                 ret = hns3_en_hw_strip_rxvtag(hns, false);
1135                 if (ret) {
1136                         hns3_err(hw, "rx strip configure fail in pf, ret =%d",
1137                                  ret);
1138                         return ret;
1139                 }
1140         }
1141
1142         return hns3_default_vlan_config(hns);
1143 }
1144
1145 static int
1146 hns3_restore_vlan_conf(struct hns3_adapter *hns)
1147 {
1148         struct hns3_pf *pf = &hns->pf;
1149         struct hns3_hw *hw = &hns->hw;
1150         uint64_t offloads;
1151         bool enable;
1152         int ret;
1153
1154         if (!hw->data->promiscuous) {
1155                 /* restore vlan filter states */
1156                 offloads = hw->data->dev_conf.rxmode.offloads;
1157                 enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
1158                 ret = hns3_enable_vlan_filter(hns, enable);
1159                 if (ret) {
1160                         hns3_err(hw, "failed to restore vlan rx filter conf, "
1161                                  "ret = %d", ret);
1162                         return ret;
1163                 }
1164         }
1165
1166         ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg);
1167         if (ret) {
1168                 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret);
1169                 return ret;
1170         }
1171
1172         ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg);
1173         if (ret)
1174                 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret);
1175
1176         return ret;
1177 }
1178
1179 static int
1180 hns3_dev_configure_vlan(struct rte_eth_dev *dev)
1181 {
1182         struct hns3_adapter *hns = dev->data->dev_private;
1183         struct rte_eth_dev_data *data = dev->data;
1184         struct rte_eth_txmode *txmode;
1185         struct hns3_hw *hw = &hns->hw;
1186         int mask;
1187         int ret;
1188
1189         txmode = &data->dev_conf.txmode;
1190         if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged)
1191                 hns3_warn(hw,
1192                           "hw_vlan_reject_tagged or hw_vlan_reject_untagged "
1193                           "configuration is not supported! Ignore these two "
1194                           "parameters: hw_vlan_reject_tagged(%u), "
1195                           "hw_vlan_reject_untagged(%u)",
1196                           txmode->hw_vlan_reject_tagged,
1197                           txmode->hw_vlan_reject_untagged);
1198
1199         /* Apply vlan offload setting */
1200         mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
1201         ret = hns3_vlan_offload_set(dev, mask);
1202         if (ret) {
1203                 hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
1204                          ret);
1205                 return ret;
1206         }
1207
1208         /*
1209          * If pvid config is not set in rte_eth_conf, driver needn't to set
1210          * VLAN pvid related configuration to hardware.
1211          */
1212         if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0)
1213                 return 0;
1214
1215         /* Apply pvid setting */
1216         ret = hns3_vlan_pvid_set(dev, txmode->pvid,
1217                                  txmode->hw_vlan_insert_pvid);
1218         if (ret)
1219                 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d",
1220                          txmode->pvid, ret);
1221
1222         return ret;
1223 }
1224
1225 static int
1226 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
1227                 unsigned int tso_mss_max)
1228 {
1229         struct hns3_cfg_tso_status_cmd *req;
1230         struct hns3_cmd_desc desc;
1231         uint16_t tso_mss;
1232
1233         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false);
1234
1235         req = (struct hns3_cfg_tso_status_cmd *)desc.data;
1236
1237         tso_mss = 0;
1238         hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1239                        tso_mss_min);
1240         req->tso_mss_min = rte_cpu_to_le_16(tso_mss);
1241
1242         tso_mss = 0;
1243         hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1244                        tso_mss_max);
1245         req->tso_mss_max = rte_cpu_to_le_16(tso_mss);
1246
1247         return hns3_cmd_send(hw, &desc, 1);
1248 }
1249
1250 static int
1251 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size,
1252                    uint16_t *allocated_size, bool is_alloc)
1253 {
1254         struct hns3_umv_spc_alc_cmd *req;
1255         struct hns3_cmd_desc desc;
1256         int ret;
1257
1258         req = (struct hns3_umv_spc_alc_cmd *)desc.data;
1259         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false);
1260         hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1);
1261         req->space_size = rte_cpu_to_le_32(space_size);
1262
1263         ret = hns3_cmd_send(hw, &desc, 1);
1264         if (ret) {
1265                 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d",
1266                              is_alloc ? "allocate" : "free", ret);
1267                 return ret;
1268         }
1269
1270         if (is_alloc && allocated_size)
1271                 *allocated_size = rte_le_to_cpu_32(desc.data[1]);
1272
1273         return 0;
1274 }
1275
1276 static int
1277 hns3_init_umv_space(struct hns3_hw *hw)
1278 {
1279         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1280         struct hns3_pf *pf = &hns->pf;
1281         uint16_t allocated_size = 0;
1282         int ret;
1283
1284         ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size,
1285                                  true);
1286         if (ret)
1287                 return ret;
1288
1289         if (allocated_size < pf->wanted_umv_size)
1290                 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u",
1291                              pf->wanted_umv_size, allocated_size);
1292
1293         pf->max_umv_size = (!!allocated_size) ? allocated_size :
1294                                                 pf->wanted_umv_size;
1295         pf->used_umv_size = 0;
1296         return 0;
1297 }
1298
1299 static int
1300 hns3_uninit_umv_space(struct hns3_hw *hw)
1301 {
1302         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1303         struct hns3_pf *pf = &hns->pf;
1304         int ret;
1305
1306         if (pf->max_umv_size == 0)
1307                 return 0;
1308
1309         ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false);
1310         if (ret)
1311                 return ret;
1312
1313         pf->max_umv_size = 0;
1314
1315         return 0;
1316 }
1317
1318 static bool
1319 hns3_is_umv_space_full(struct hns3_hw *hw)
1320 {
1321         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1322         struct hns3_pf *pf = &hns->pf;
1323         bool is_full;
1324
1325         is_full = (pf->used_umv_size >= pf->max_umv_size);
1326
1327         return is_full;
1328 }
1329
1330 static void
1331 hns3_update_umv_space(struct hns3_hw *hw, bool is_free)
1332 {
1333         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1334         struct hns3_pf *pf = &hns->pf;
1335
1336         if (is_free) {
1337                 if (pf->used_umv_size > 0)
1338                         pf->used_umv_size--;
1339         } else
1340                 pf->used_umv_size++;
1341 }
1342
1343 static void
1344 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req,
1345                       const uint8_t *addr, bool is_mc)
1346 {
1347         const unsigned char *mac_addr = addr;
1348         uint32_t high_val = ((uint32_t)mac_addr[3] << 24) |
1349                             ((uint32_t)mac_addr[2] << 16) |
1350                             ((uint32_t)mac_addr[1] << 8) |
1351                             (uint32_t)mac_addr[0];
1352         uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4];
1353
1354         hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1355         if (is_mc) {
1356                 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1357                 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1);
1358                 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1359         }
1360
1361         new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val);
1362         new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff);
1363 }
1364
1365 static int
1366 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp,
1367                              uint8_t resp_code,
1368                              enum hns3_mac_vlan_tbl_opcode op)
1369 {
1370         if (cmdq_resp) {
1371                 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u",
1372                          cmdq_resp);
1373                 return -EIO;
1374         }
1375
1376         if (op == HNS3_MAC_VLAN_ADD) {
1377                 if (resp_code == 0 || resp_code == 1) {
1378                         return 0;
1379                 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) {
1380                         hns3_err(hw, "add mac addr failed for uc_overflow");
1381                         return -ENOSPC;
1382                 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) {
1383                         hns3_err(hw, "add mac addr failed for mc_overflow");
1384                         return -ENOSPC;
1385                 }
1386
1387                 hns3_err(hw, "add mac addr failed for undefined, code=%u",
1388                          resp_code);
1389                 return -EIO;
1390         } else if (op == HNS3_MAC_VLAN_REMOVE) {
1391                 if (resp_code == 0) {
1392                         return 0;
1393                 } else if (resp_code == 1) {
1394                         hns3_dbg(hw, "remove mac addr failed for miss");
1395                         return -ENOENT;
1396                 }
1397
1398                 hns3_err(hw, "remove mac addr failed for undefined, code=%u",
1399                          resp_code);
1400                 return -EIO;
1401         } else if (op == HNS3_MAC_VLAN_LKUP) {
1402                 if (resp_code == 0) {
1403                         return 0;
1404                 } else if (resp_code == 1) {
1405                         hns3_dbg(hw, "lookup mac addr failed for miss");
1406                         return -ENOENT;
1407                 }
1408
1409                 hns3_err(hw, "lookup mac addr failed for undefined, code=%u",
1410                          resp_code);
1411                 return -EIO;
1412         }
1413
1414         hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u",
1415                  op);
1416
1417         return -EINVAL;
1418 }
1419
1420 static int
1421 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw,
1422                          struct hns3_mac_vlan_tbl_entry_cmd *req,
1423                          struct hns3_cmd_desc *desc, uint8_t desc_num)
1424 {
1425         uint8_t resp_code;
1426         uint16_t retval;
1427         int ret;
1428         int i;
1429
1430         if (desc_num == HNS3_MC_MAC_VLAN_OPS_DESC_NUM) {
1431                 for (i = 0; i < desc_num - 1; i++) {
1432                         hns3_cmd_setup_basic_desc(&desc[i],
1433                                                   HNS3_OPC_MAC_VLAN_ADD, true);
1434                         desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1435                         if (i == 0)
1436                                 memcpy(desc[i].data, req,
1437                                 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1438                 }
1439                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_MAC_VLAN_ADD,
1440                                           true);
1441         } else {
1442                 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD,
1443                                           true);
1444                 memcpy(desc[0].data, req,
1445                        sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1446         }
1447         ret = hns3_cmd_send(hw, desc, desc_num);
1448         if (ret) {
1449                 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.",
1450                          ret);
1451                 return ret;
1452         }
1453         resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1454         retval = rte_le_to_cpu_16(desc[0].retval);
1455
1456         return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1457                                             HNS3_MAC_VLAN_LKUP);
1458 }
1459
1460 static int
1461 hns3_add_mac_vlan_tbl(struct hns3_hw *hw,
1462                       struct hns3_mac_vlan_tbl_entry_cmd *req,
1463                       struct hns3_cmd_desc *desc, uint8_t desc_num)
1464 {
1465         uint8_t resp_code;
1466         uint16_t retval;
1467         int cfg_status;
1468         int ret;
1469         int i;
1470
1471         if (desc_num == HNS3_UC_MAC_VLAN_OPS_DESC_NUM) {
1472                 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_MAC_VLAN_ADD, false);
1473                 memcpy(desc->data, req,
1474                        sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1475                 ret = hns3_cmd_send(hw, desc, desc_num);
1476                 resp_code = (rte_le_to_cpu_32(desc->data[0]) >> 8) & 0xff;
1477                 retval = rte_le_to_cpu_16(desc->retval);
1478
1479                 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1480                                                           HNS3_MAC_VLAN_ADD);
1481         } else {
1482                 for (i = 0; i < desc_num; i++) {
1483                         hns3_cmd_reuse_desc(&desc[i], false);
1484                         if (i == desc_num - 1)
1485                                 desc[i].flag &=
1486                                         rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT);
1487                         else
1488                                 desc[i].flag |=
1489                                         rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1490                 }
1491                 memcpy(desc[0].data, req,
1492                        sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1493                 desc[0].retval = 0;
1494                 ret = hns3_cmd_send(hw, desc, desc_num);
1495                 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1496                 retval = rte_le_to_cpu_16(desc[0].retval);
1497
1498                 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1499                                                           HNS3_MAC_VLAN_ADD);
1500         }
1501
1502         if (ret) {
1503                 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret);
1504                 return ret;
1505         }
1506
1507         return cfg_status;
1508 }
1509
1510 static int
1511 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw,
1512                          struct hns3_mac_vlan_tbl_entry_cmd *req)
1513 {
1514         struct hns3_cmd_desc desc;
1515         uint8_t resp_code;
1516         uint16_t retval;
1517         int ret;
1518
1519         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false);
1520
1521         memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1522
1523         ret = hns3_cmd_send(hw, &desc, 1);
1524         if (ret) {
1525                 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret);
1526                 return ret;
1527         }
1528         resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1529         retval = rte_le_to_cpu_16(desc.retval);
1530
1531         return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1532                                             HNS3_MAC_VLAN_REMOVE);
1533 }
1534
1535 static int
1536 hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1537 {
1538         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1539         struct hns3_mac_vlan_tbl_entry_cmd req;
1540         struct hns3_pf *pf = &hns->pf;
1541         struct hns3_cmd_desc desc;
1542         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1543         uint16_t egress_port = 0;
1544         uint8_t vf_id;
1545         int ret;
1546
1547         /* check if mac addr is valid */
1548         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1549                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1550                                       mac_addr);
1551                 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid",
1552                          mac_str);
1553                 return -EINVAL;
1554         }
1555
1556         memset(&req, 0, sizeof(req));
1557
1558         /*
1559          * In current version VF is not supported when PF is driven by DPDK
1560          * driver, just need to configure parameters for PF vport.
1561          */
1562         vf_id = HNS3_PF_FUNC_ID;
1563         hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M,
1564                        HNS3_MAC_EPORT_VFID_S, vf_id);
1565
1566         req.egress_port = rte_cpu_to_le_16(egress_port);
1567
1568         hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1569
1570         /*
1571          * Lookup the mac address in the mac_vlan table, and add
1572          * it if the entry is inexistent. Repeated unicast entry
1573          * is not allowed in the mac vlan table.
1574          */
1575         ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc,
1576                                         HNS3_UC_MAC_VLAN_OPS_DESC_NUM);
1577         if (ret == -ENOENT) {
1578                 if (!hns3_is_umv_space_full(hw)) {
1579                         ret = hns3_add_mac_vlan_tbl(hw, &req, &desc,
1580                                                 HNS3_UC_MAC_VLAN_OPS_DESC_NUM);
1581                         if (!ret)
1582                                 hns3_update_umv_space(hw, false);
1583                         return ret;
1584                 }
1585
1586                 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size);
1587
1588                 return -ENOSPC;
1589         }
1590
1591         hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
1592
1593         /* check if we just hit the duplicate */
1594         if (ret == 0) {
1595                 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str);
1596                 return 0;
1597         }
1598
1599         hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table",
1600                  mac_str);
1601
1602         return ret;
1603 }
1604
1605 static int
1606 hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1607 {
1608         struct hns3_mac_vlan_tbl_entry_cmd req;
1609         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1610         int ret;
1611
1612         /* check if mac addr is valid */
1613         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1614                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1615                                       mac_addr);
1616                 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid",
1617                          mac_str);
1618                 return -EINVAL;
1619         }
1620
1621         memset(&req, 0, sizeof(req));
1622         hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1623         hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1624         ret = hns3_remove_mac_vlan_tbl(hw, &req);
1625         if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */
1626                 return 0;
1627         else if (ret == 0)
1628                 hns3_update_umv_space(hw, true);
1629
1630         return ret;
1631 }
1632
1633 static int
1634 hns3_set_default_mac_addr(struct rte_eth_dev *dev,
1635                           struct rte_ether_addr *mac_addr)
1636 {
1637         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1638         struct rte_ether_addr *oaddr;
1639         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1640         int ret, ret_val;
1641
1642         rte_spinlock_lock(&hw->lock);
1643         oaddr = (struct rte_ether_addr *)hw->mac.mac_addr;
1644         ret = hw->ops.del_uc_mac_addr(hw, oaddr);
1645         if (ret) {
1646                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1647                                       oaddr);
1648                 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
1649                           mac_str, ret);
1650
1651                 rte_spinlock_unlock(&hw->lock);
1652                 return ret;
1653         }
1654
1655         ret = hw->ops.add_uc_mac_addr(hw, mac_addr);
1656         if (ret) {
1657                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1658                                       mac_addr);
1659                 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret);
1660                 goto err_add_uc_addr;
1661         }
1662
1663         ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes);
1664         if (ret) {
1665                 hns3_err(hw, "Failed to configure mac pause address: %d", ret);
1666                 goto err_pause_addr_cfg;
1667         }
1668
1669         rte_ether_addr_copy(mac_addr,
1670                             (struct rte_ether_addr *)hw->mac.mac_addr);
1671         rte_spinlock_unlock(&hw->lock);
1672
1673         return 0;
1674
1675 err_pause_addr_cfg:
1676         ret_val = hw->ops.del_uc_mac_addr(hw, mac_addr);
1677         if (ret_val) {
1678                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1679                                       mac_addr);
1680                 hns3_warn(hw,
1681                           "Failed to roll back to del setted mac addr(%s): %d",
1682                           mac_str, ret_val);
1683         }
1684
1685 err_add_uc_addr:
1686         ret_val = hw->ops.add_uc_mac_addr(hw, oaddr);
1687         if (ret_val) {
1688                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr);
1689                 hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d",
1690                                   mac_str, ret_val);
1691         }
1692         rte_spinlock_unlock(&hw->lock);
1693
1694         return ret;
1695 }
1696
1697 static void
1698 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr)
1699 {
1700 #define HNS3_VF_NUM_IN_FIRST_DESC 192
1701         uint8_t word_num;
1702         uint8_t bit_num;
1703
1704         if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) {
1705                 word_num = vfid / 32;
1706                 bit_num = vfid % 32;
1707                 if (clr)
1708                         desc[1].data[word_num] &=
1709                             rte_cpu_to_le_32(~(1UL << bit_num));
1710                 else
1711                         desc[1].data[word_num] |=
1712                             rte_cpu_to_le_32(1UL << bit_num);
1713         } else {
1714                 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32;
1715                 bit_num = vfid % 32;
1716                 if (clr)
1717                         desc[2].data[word_num] &=
1718                             rte_cpu_to_le_32(~(1UL << bit_num));
1719                 else
1720                         desc[2].data[word_num] |=
1721                             rte_cpu_to_le_32(1UL << bit_num);
1722         }
1723 }
1724
1725 static int
1726 hns3_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1727 {
1728         struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM];
1729         struct hns3_mac_vlan_tbl_entry_cmd req;
1730         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1731         uint8_t vf_id;
1732         int ret;
1733
1734         /* Check if mac addr is valid */
1735         if (!rte_is_multicast_ether_addr(mac_addr)) {
1736                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1737                                       mac_addr);
1738                 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid",
1739                          mac_str);
1740                 return -EINVAL;
1741         }
1742
1743         memset(&req, 0, sizeof(req));
1744         hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1745         hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1746         ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc,
1747                                         HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1748         if (ret) {
1749                 /* This mac addr do not exist, add new entry for it */
1750                 memset(desc[0].data, 0, sizeof(desc[0].data));
1751                 memset(desc[1].data, 0, sizeof(desc[0].data));
1752                 memset(desc[2].data, 0, sizeof(desc[0].data));
1753         }
1754
1755         /*
1756          * In current version VF is not supported when PF is driven by DPDK
1757          * driver, just need to configure parameters for PF vport.
1758          */
1759         vf_id = HNS3_PF_FUNC_ID;
1760         hns3_update_desc_vfid(desc, vf_id, false);
1761         ret = hns3_add_mac_vlan_tbl(hw, &req, desc,
1762                                         HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1763         if (ret) {
1764                 if (ret == -ENOSPC)
1765                         hns3_err(hw, "mc mac vlan table is full");
1766                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1767                                       mac_addr);
1768                 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret);
1769         }
1770
1771         return ret;
1772 }
1773
1774 static int
1775 hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1776 {
1777         struct hns3_mac_vlan_tbl_entry_cmd req;
1778         struct hns3_cmd_desc desc[3];
1779         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1780         uint8_t vf_id;
1781         int ret;
1782
1783         /* Check if mac addr is valid */
1784         if (!rte_is_multicast_ether_addr(mac_addr)) {
1785                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1786                                       mac_addr);
1787                 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid",
1788                          mac_str);
1789                 return -EINVAL;
1790         }
1791
1792         memset(&req, 0, sizeof(req));
1793         hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1794         hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1795         ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc,
1796                                         HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1797         if (ret == 0) {
1798                 /*
1799                  * This mac addr exist, remove this handle's VFID for it.
1800                  * In current version VF is not supported when PF is driven by
1801                  * DPDK driver, just need to configure parameters for PF vport.
1802                  */
1803                 vf_id = HNS3_PF_FUNC_ID;
1804                 hns3_update_desc_vfid(desc, vf_id, true);
1805
1806                 /* All the vfid is zero, so need to delete this entry */
1807                 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1808         } else if (ret == -ENOENT) {
1809                 /* This mac addr doesn't exist. */
1810                 return 0;
1811         }
1812
1813         if (ret) {
1814                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1815                                       mac_addr);
1816                 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret);
1817         }
1818
1819         return ret;
1820 }
1821
1822 static int
1823 hns3_check_mq_mode(struct rte_eth_dev *dev)
1824 {
1825         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1826         enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1827         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1828         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1829         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1830         struct rte_eth_dcb_tx_conf *dcb_tx_conf;
1831         uint8_t num_tc;
1832         int max_tc = 0;
1833         int i;
1834
1835         if (((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
1836             (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
1837              tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
1838                 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
1839                          rx_mq_mode, tx_mq_mode);
1840                 return -EOPNOTSUPP;
1841         }
1842
1843         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1844         dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
1845         if ((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
1846                 if (dcb_rx_conf->nb_tcs > pf->tc_max) {
1847                         hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
1848                                  dcb_rx_conf->nb_tcs, pf->tc_max);
1849                         return -EINVAL;
1850                 }
1851
1852                 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
1853                       dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
1854                         hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
1855                                  "nb_tcs(%d) != %d or %d in rx direction.",
1856                                  dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
1857                         return -EINVAL;
1858                 }
1859
1860                 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
1861                         hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
1862                                  dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
1863                         return -EINVAL;
1864                 }
1865
1866                 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1867                         if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
1868                                 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, "
1869                                          "is not equal to one in tx direction.",
1870                                          i, dcb_rx_conf->dcb_tc[i]);
1871                                 return -EINVAL;
1872                         }
1873                         if (dcb_rx_conf->dcb_tc[i] > max_tc)
1874                                 max_tc = dcb_rx_conf->dcb_tc[i];
1875                 }
1876
1877                 num_tc = max_tc + 1;
1878                 if (num_tc > dcb_rx_conf->nb_tcs) {
1879                         hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
1880                                  num_tc, dcb_rx_conf->nb_tcs);
1881                         return -EINVAL;
1882                 }
1883         }
1884
1885         return 0;
1886 }
1887
1888 static int
1889 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en,
1890                            enum hns3_ring_type queue_type, uint16_t queue_id)
1891 {
1892         struct hns3_cmd_desc desc;
1893         struct hns3_ctrl_vector_chain_cmd *req =
1894                 (struct hns3_ctrl_vector_chain_cmd *)desc.data;
1895         enum hns3_opcode_type op;
1896         uint16_t tqp_type_and_id = 0;
1897         uint16_t type;
1898         uint16_t gl;
1899         int ret;
1900
1901         op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
1902         hns3_cmd_setup_basic_desc(&desc, op, false);
1903         req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M,
1904                                               HNS3_TQP_INT_ID_L_S);
1905         req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M,
1906                                               HNS3_TQP_INT_ID_H_S);
1907
1908         if (queue_type == HNS3_RING_TYPE_RX)
1909                 gl = HNS3_RING_GL_RX;
1910         else
1911                 gl = HNS3_RING_GL_TX;
1912
1913         type = queue_type;
1914
1915         hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
1916                        type);
1917         hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
1918         hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
1919                        gl);
1920         req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
1921         req->int_cause_num = 1;
1922         ret = hns3_cmd_send(hw, &desc, 1);
1923         if (ret) {
1924                 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.",
1925                          en ? "Map" : "Unmap", queue_id, vector_id, ret);
1926                 return ret;
1927         }
1928
1929         return 0;
1930 }
1931
1932 static int
1933 hns3_init_ring_with_vector(struct hns3_hw *hw)
1934 {
1935         uint16_t vec;
1936         int ret;
1937         int i;
1938
1939         /*
1940          * In hns3 network engine, vector 0 is always the misc interrupt of this
1941          * function, vector 1~N can be used respectively for the queues of the
1942          * function. Tx and Rx queues with the same number share the interrupt
1943          * vector. In the initialization clearing the all hardware mapping
1944          * relationship configurations between queues and interrupt vectors is
1945          * needed, so some error caused by the residual configurations, such as
1946          * the unexpected Tx interrupt, can be avoid.
1947          */
1948         vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
1949         if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
1950                 vec = vec - 1; /* the last interrupt is reserved */
1951         hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
1952         for (i = 0; i < hw->intr_tqps_num; i++) {
1953                 /*
1954                  * Set gap limiter/rate limiter/quanity limiter algorithm
1955                  * configuration for interrupt coalesce of queue's interrupt.
1956                  */
1957                 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
1958                                        HNS3_TQP_INTR_GL_DEFAULT);
1959                 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
1960                                        HNS3_TQP_INTR_GL_DEFAULT);
1961                 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
1962                 /*
1963                  * QL(quantity limiter) is not used currently, just set 0 to
1964                  * close it.
1965                  */
1966                 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
1967
1968                 ret = hns3_bind_ring_with_vector(hw, vec, false,
1969                                                  HNS3_RING_TYPE_TX, i);
1970                 if (ret) {
1971                         PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
1972                                           "vector: %u, ret=%d", i, vec, ret);
1973                         return ret;
1974                 }
1975
1976                 ret = hns3_bind_ring_with_vector(hw, vec, false,
1977                                                  HNS3_RING_TYPE_RX, i);
1978                 if (ret) {
1979                         PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
1980                                           "vector: %u, ret=%d", i, vec, ret);
1981                         return ret;
1982                 }
1983         }
1984
1985         return 0;
1986 }
1987
1988 static int
1989 hns3_setup_dcb(struct rte_eth_dev *dev)
1990 {
1991         struct hns3_adapter *hns = dev->data->dev_private;
1992         struct hns3_hw *hw = &hns->hw;
1993         int ret;
1994
1995         if (!hns3_dev_get_support(hw, DCB)) {
1996                 hns3_err(hw, "this port does not support dcb configurations.");
1997                 return -EOPNOTSUPP;
1998         }
1999
2000         if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
2001                 hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
2002                 return -EOPNOTSUPP;
2003         }
2004
2005         ret = hns3_dcb_configure(hns);
2006         if (ret)
2007                 hns3_err(hw, "failed to config dcb: %d", ret);
2008
2009         return ret;
2010 }
2011
2012 static int
2013 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
2014 {
2015         int ret;
2016
2017         /*
2018          * Some hardware doesn't support auto-negotiation, but users may not
2019          * configure link_speeds (default 0), which means auto-negotiation.
2020          * In this case, it should return success.
2021          */
2022         if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
2023             hw->mac.support_autoneg == 0)
2024                 return 0;
2025
2026         if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
2027                 ret = hns3_check_port_speed(hw, link_speeds);
2028                 if (ret)
2029                         return ret;
2030         }
2031
2032         return 0;
2033 }
2034
2035 static int
2036 hns3_check_dev_conf(struct rte_eth_dev *dev)
2037 {
2038         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2039         struct rte_eth_conf *conf = &dev->data->dev_conf;
2040         int ret;
2041
2042         ret = hns3_check_mq_mode(dev);
2043         if (ret)
2044                 return ret;
2045
2046         return hns3_check_link_speed(hw, conf->link_speeds);
2047 }
2048
2049 static int
2050 hns3_dev_configure(struct rte_eth_dev *dev)
2051 {
2052         struct hns3_adapter *hns = dev->data->dev_private;
2053         struct rte_eth_conf *conf = &dev->data->dev_conf;
2054         enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
2055         struct hns3_hw *hw = &hns->hw;
2056         uint16_t nb_rx_q = dev->data->nb_rx_queues;
2057         uint16_t nb_tx_q = dev->data->nb_tx_queues;
2058         struct rte_eth_rss_conf rss_conf;
2059         bool gro_en;
2060         int ret;
2061
2062         hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
2063
2064         /*
2065          * Some versions of hardware network engine does not support
2066          * individually enable/disable/reset the Tx or Rx queue. These devices
2067          * must enable/disable/reset Tx and Rx queues at the same time. When the
2068          * numbers of Tx queues allocated by upper applications are not equal to
2069          * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
2070          * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
2071          * work as usual. But these fake queues are imperceptible, and can not
2072          * be used by upper applications.
2073          */
2074         ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
2075         if (ret) {
2076                 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
2077                 hw->cfg_max_queues = 0;
2078                 return ret;
2079         }
2080
2081         hw->adapter_state = HNS3_NIC_CONFIGURING;
2082         ret = hns3_check_dev_conf(dev);
2083         if (ret)
2084                 goto cfg_err;
2085
2086         if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
2087                 ret = hns3_setup_dcb(dev);
2088                 if (ret)
2089                         goto cfg_err;
2090         }
2091
2092         /* When RSS is not configured, redirect the packet queue 0 */
2093         if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
2094                 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
2095                 rss_conf = conf->rx_adv_conf.rss_conf;
2096                 hw->rss_dis_flag = false;
2097                 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
2098                 if (ret)
2099                         goto cfg_err;
2100         }
2101
2102         ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu);
2103         if (ret != 0)
2104                 goto cfg_err;
2105
2106         ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf);
2107         if (ret)
2108                 goto cfg_err;
2109
2110         ret = hns3_dev_configure_vlan(dev);
2111         if (ret)
2112                 goto cfg_err;
2113
2114         /* config hardware GRO */
2115         gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
2116         ret = hns3_config_gro(hw, gro_en);
2117         if (ret)
2118                 goto cfg_err;
2119
2120         hns3_init_rx_ptype_tble(dev);
2121         hw->adapter_state = HNS3_NIC_CONFIGURED;
2122
2123         return 0;
2124
2125 cfg_err:
2126         hw->cfg_max_queues = 0;
2127         (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
2128         hw->adapter_state = HNS3_NIC_INITIALIZED;
2129
2130         return ret;
2131 }
2132
2133 static int
2134 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
2135 {
2136         struct hns3_config_max_frm_size_cmd *req;
2137         struct hns3_cmd_desc desc;
2138
2139         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);
2140
2141         req = (struct hns3_config_max_frm_size_cmd *)desc.data;
2142         req->max_frm_size = rte_cpu_to_le_16(new_mps);
2143         req->min_frm_size = RTE_ETHER_MIN_LEN;
2144
2145         return hns3_cmd_send(hw, &desc, 1);
2146 }
2147
2148 static int
2149 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
2150 {
2151         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2152         uint16_t original_mps = hns->pf.mps;
2153         int err;
2154         int ret;
2155
2156         ret = hns3_set_mac_mtu(hw, mps);
2157         if (ret) {
2158                 hns3_err(hw, "failed to set mtu, ret = %d", ret);
2159                 return ret;
2160         }
2161
2162         hns->pf.mps = mps;
2163         ret = hns3_buffer_alloc(hw);
2164         if (ret) {
2165                 hns3_err(hw, "failed to allocate buffer, ret = %d", ret);
2166                 goto rollback;
2167         }
2168
2169         return 0;
2170
2171 rollback:
2172         err = hns3_set_mac_mtu(hw, original_mps);
2173         if (err) {
2174                 hns3_err(hw, "fail to rollback MTU, err = %d", err);
2175                 return ret;
2176         }
2177         hns->pf.mps = original_mps;
2178
2179         return ret;
2180 }
2181
2182 static int
2183 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2184 {
2185         struct hns3_adapter *hns = dev->data->dev_private;
2186         uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
2187         struct hns3_hw *hw = &hns->hw;
2188         int ret;
2189
2190         if (dev->data->dev_started) {
2191                 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
2192                          "before configuration", dev->data->port_id);
2193                 return -EBUSY;
2194         }
2195
2196         rte_spinlock_lock(&hw->lock);
2197         frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
2198
2199         /*
2200          * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely
2201          * assign to "uint16_t" type variable.
2202          */
2203         ret = hns3_config_mtu(hw, (uint16_t)frame_size);
2204         if (ret) {
2205                 rte_spinlock_unlock(&hw->lock);
2206                 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d",
2207                          dev->data->port_id, mtu, ret);
2208                 return ret;
2209         }
2210
2211         rte_spinlock_unlock(&hw->lock);
2212
2213         return 0;
2214 }
2215
2216 static uint32_t
2217 hns3_get_copper_port_speed_capa(uint32_t supported_speed)
2218 {
2219         uint32_t speed_capa = 0;
2220
2221         if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
2222                 speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
2223         if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
2224                 speed_capa |= RTE_ETH_LINK_SPEED_10M;
2225         if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
2226                 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
2227         if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
2228                 speed_capa |= RTE_ETH_LINK_SPEED_100M;
2229         if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
2230                 speed_capa |= RTE_ETH_LINK_SPEED_1G;
2231
2232         return speed_capa;
2233 }
2234
2235 static uint32_t
2236 hns3_get_firber_port_speed_capa(uint32_t supported_speed)
2237 {
2238         uint32_t speed_capa = 0;
2239
2240         if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
2241                 speed_capa |= RTE_ETH_LINK_SPEED_1G;
2242         if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
2243                 speed_capa |= RTE_ETH_LINK_SPEED_10G;
2244         if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
2245                 speed_capa |= RTE_ETH_LINK_SPEED_25G;
2246         if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
2247                 speed_capa |= RTE_ETH_LINK_SPEED_40G;
2248         if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
2249                 speed_capa |= RTE_ETH_LINK_SPEED_50G;
2250         if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
2251                 speed_capa |= RTE_ETH_LINK_SPEED_100G;
2252         if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
2253                 speed_capa |= RTE_ETH_LINK_SPEED_200G;
2254
2255         return speed_capa;
2256 }
2257
2258 static uint32_t
2259 hns3_get_speed_capa(struct hns3_hw *hw)
2260 {
2261         struct hns3_mac *mac = &hw->mac;
2262         uint32_t speed_capa;
2263
2264         if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
2265                 speed_capa =
2266                         hns3_get_copper_port_speed_capa(mac->supported_speed);
2267         else
2268                 speed_capa =
2269                         hns3_get_firber_port_speed_capa(mac->supported_speed);
2270
2271         if (mac->support_autoneg == 0)
2272                 speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
2273
2274         return speed_capa;
2275 }
2276
2277 int
2278 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
2279 {
2280         struct hns3_adapter *hns = eth_dev->data->dev_private;
2281         struct hns3_hw *hw = &hns->hw;
2282         uint16_t queue_num = hw->tqps_num;
2283
2284         /*
2285          * In interrupt mode, 'max_rx_queues' is set based on the number of
2286          * MSI-X interrupt resources of the hardware.
2287          */
2288         if (hw->data->dev_conf.intr_conf.rxq == 1)
2289                 queue_num = hw->intr_tqps_num;
2290
2291         info->max_rx_queues = queue_num;
2292         info->max_tx_queues = hw->tqps_num;
2293         info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
2294         info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
2295         info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
2296         info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
2297         info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
2298         info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
2299                                  RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
2300                                  RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2301                                  RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
2302                                  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
2303                                  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
2304                                  RTE_ETH_RX_OFFLOAD_KEEP_CRC |
2305                                  RTE_ETH_RX_OFFLOAD_SCATTER |
2306                                  RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
2307                                  RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
2308                                  RTE_ETH_RX_OFFLOAD_RSS_HASH |
2309                                  RTE_ETH_RX_OFFLOAD_TCP_LRO);
2310         info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2311                                  RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
2312                                  RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
2313                                  RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
2314                                  RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
2315                                  RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
2316                                  RTE_ETH_TX_OFFLOAD_TCP_TSO |
2317                                  RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
2318                                  RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
2319                                  RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
2320                                  RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
2321                                  hns3_txvlan_cap_get(hw));
2322
2323         if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
2324                 info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
2325
2326         if (hns3_dev_get_support(hw, INDEP_TXRX))
2327                 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
2328                                  RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
2329         info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
2330
2331         if (hns3_dev_get_support(hw, PTP))
2332                 info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
2333
2334         info->rx_desc_lim = (struct rte_eth_desc_lim) {
2335                 .nb_max = HNS3_MAX_RING_DESC,
2336                 .nb_min = HNS3_MIN_RING_DESC,
2337                 .nb_align = HNS3_ALIGN_RING_DESC,
2338         };
2339
2340         info->tx_desc_lim = (struct rte_eth_desc_lim) {
2341                 .nb_max = HNS3_MAX_RING_DESC,
2342                 .nb_min = HNS3_MIN_RING_DESC,
2343                 .nb_align = HNS3_ALIGN_RING_DESC,
2344                 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
2345                 .nb_mtu_seg_max = hw->max_non_tso_bd_num,
2346         };
2347
2348         info->speed_capa = hns3_get_speed_capa(hw);
2349         info->default_rxconf = (struct rte_eth_rxconf) {
2350                 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
2351                 /*
2352                  * If there are no available Rx buffer descriptors, incoming
2353                  * packets are always dropped by hardware based on hns3 network
2354                  * engine.
2355                  */
2356                 .rx_drop_en = 1,
2357                 .offloads = 0,
2358         };
2359         info->default_txconf = (struct rte_eth_txconf) {
2360                 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
2361                 .offloads = 0,
2362         };
2363
2364         info->reta_size = hw->rss_ind_tbl_size;
2365         info->hash_key_size = HNS3_RSS_KEY_SIZE;
2366         info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
2367
2368         info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
2369         info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
2370         info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
2371         info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
2372         info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
2373         info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
2374
2375         return 0;
2376 }
2377
2378 static int
2379 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
2380                     size_t fw_size)
2381 {
2382         struct hns3_adapter *hns = eth_dev->data->dev_private;
2383         struct hns3_hw *hw = &hns->hw;
2384         uint32_t version = hw->fw_version;
2385         int ret;
2386
2387         ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
2388                        hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
2389                                       HNS3_FW_VERSION_BYTE3_S),
2390                        hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
2391                                       HNS3_FW_VERSION_BYTE2_S),
2392                        hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
2393                                       HNS3_FW_VERSION_BYTE1_S),
2394                        hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
2395                                       HNS3_FW_VERSION_BYTE0_S));
2396         if (ret < 0)
2397                 return -EINVAL;
2398
2399         ret += 1; /* add the size of '\0' */
2400         if (fw_size < (size_t)ret)
2401                 return ret;
2402         else
2403                 return 0;
2404 }
2405
2406 static int
2407 hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
2408 {
2409         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2410         int ret;
2411
2412         (void)hns3_update_link_status(hw);
2413
2414         ret = hns3_update_link_info(eth_dev);
2415         if (ret)
2416                 hw->mac.link_status = RTE_ETH_LINK_DOWN;
2417
2418         return ret;
2419 }
2420
2421 static void
2422 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
2423                       struct rte_eth_link *new_link)
2424 {
2425         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2426         struct hns3_mac *mac = &hw->mac;
2427
2428         switch (mac->link_speed) {
2429         case RTE_ETH_SPEED_NUM_10M:
2430         case RTE_ETH_SPEED_NUM_100M:
2431         case RTE_ETH_SPEED_NUM_1G:
2432         case RTE_ETH_SPEED_NUM_10G:
2433         case RTE_ETH_SPEED_NUM_25G:
2434         case RTE_ETH_SPEED_NUM_40G:
2435         case RTE_ETH_SPEED_NUM_50G:
2436         case RTE_ETH_SPEED_NUM_100G:
2437         case RTE_ETH_SPEED_NUM_200G:
2438                 if (mac->link_status)
2439                         new_link->link_speed = mac->link_speed;
2440                 break;
2441         default:
2442                 if (mac->link_status)
2443                         new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
2444                 break;
2445         }
2446
2447         if (!mac->link_status)
2448                 new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
2449
2450         new_link->link_duplex = mac->link_duplex;
2451         new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
2452         new_link->link_autoneg = mac->link_autoneg;
2453 }
2454
2455 static int
2456 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
2457 {
2458 #define HNS3_LINK_CHECK_INTERVAL 100  /* 100ms */
2459 #define HNS3_MAX_LINK_CHECK_TIMES 20  /* 2s (100 * 20ms) in total */
2460
2461         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2462         uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES;
2463         struct hns3_mac *mac = &hw->mac;
2464         struct rte_eth_link new_link;
2465         int ret;
2466
2467         /* When port is stopped, report link down. */
2468         if (eth_dev->data->dev_started == 0) {
2469                 new_link.link_autoneg = mac->link_autoneg;
2470                 new_link.link_duplex = mac->link_duplex;
2471                 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2472                 new_link.link_status = RTE_ETH_LINK_DOWN;
2473                 goto out;
2474         }
2475
2476         do {
2477                 ret = hns3_update_port_link_info(eth_dev);
2478                 if (ret) {
2479                         hns3_err(hw, "failed to get port link info, ret = %d.",
2480                                  ret);
2481                         break;
2482                 }
2483
2484                 if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
2485                         break;
2486
2487                 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
2488         } while (retry_cnt--);
2489
2490         memset(&new_link, 0, sizeof(new_link));
2491         hns3_setup_linkstatus(eth_dev, &new_link);
2492
2493 out:
2494         return rte_eth_linkstatus_set(eth_dev, &new_link);
2495 }
2496
2497 static int
2498 hns3_dev_set_link_up(struct rte_eth_dev *dev)
2499 {
2500         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2501         int ret;
2502
2503         /*
2504          * The "tx_pkt_burst" will be restored. But the secondary process does
2505          * not support the mechanism for notifying the primary process.
2506          */
2507         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2508                 hns3_err(hw, "secondary process does not support to set link up.");
2509                 return -ENOTSUP;
2510         }
2511
2512         /*
2513          * If device isn't started Rx/Tx function is still disabled, setting
2514          * link up is not allowed. But it is probably better to return success
2515          * to reduce the impact on the upper layer.
2516          */
2517         if (hw->adapter_state != HNS3_NIC_STARTED) {
2518                 hns3_info(hw, "device isn't started, can't set link up.");
2519                 return 0;
2520         }
2521
2522         if (!hw->set_link_down)
2523                 return 0;
2524
2525         rte_spinlock_lock(&hw->lock);
2526         ret = hns3_cfg_mac_mode(hw, true);
2527         if (ret) {
2528                 rte_spinlock_unlock(&hw->lock);
2529                 hns3_err(hw, "failed to set link up, ret = %d", ret);
2530                 return ret;
2531         }
2532
2533         hw->set_link_down = false;
2534         hns3_start_tx_datapath(dev);
2535         rte_spinlock_unlock(&hw->lock);
2536
2537         return 0;
2538 }
2539
2540 static int
2541 hns3_dev_set_link_down(struct rte_eth_dev *dev)
2542 {
2543         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2544         int ret;
2545
2546         /*
2547          * The "tx_pkt_burst" will be set to dummy function. But the secondary
2548          * process does not support the mechanism for notifying the primary
2549          * process.
2550          */
2551         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2552                 hns3_err(hw, "secondary process does not support to set link down.");
2553                 return -ENOTSUP;
2554         }
2555
2556         /*
2557          * If device isn't started or the API has been called, link status is
2558          * down, return success.
2559          */
2560         if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down)
2561                 return 0;
2562
2563         rte_spinlock_lock(&hw->lock);
2564         hns3_stop_tx_datapath(dev);
2565         ret = hns3_cfg_mac_mode(hw, false);
2566         if (ret) {
2567                 hns3_start_tx_datapath(dev);
2568                 rte_spinlock_unlock(&hw->lock);
2569                 hns3_err(hw, "failed to set link down, ret = %d", ret);
2570                 return ret;
2571         }
2572
2573         hw->set_link_down = true;
2574         rte_spinlock_unlock(&hw->lock);
2575
2576         return 0;
2577 }
2578
2579 static int
2580 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
2581 {
2582         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2583         struct hns3_pf *pf = &hns->pf;
2584
2585         if (!(status->pf_state & HNS3_PF_STATE_DONE))
2586                 return -EINVAL;
2587
2588         pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;
2589
2590         return 0;
2591 }
2592
2593 static int
2594 hns3_query_function_status(struct hns3_hw *hw)
2595 {
2596 #define HNS3_QUERY_MAX_CNT              10
2597 #define HNS3_QUERY_SLEEP_MSCOEND        1
2598         struct hns3_func_status_cmd *req;
2599         struct hns3_cmd_desc desc;
2600         int timeout = 0;
2601         int ret;
2602
2603         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);
2604         req = (struct hns3_func_status_cmd *)desc.data;
2605
2606         do {
2607                 ret = hns3_cmd_send(hw, &desc, 1);
2608                 if (ret) {
2609                         PMD_INIT_LOG(ERR, "query function status failed %d",
2610                                      ret);
2611                         return ret;
2612                 }
2613
2614                 /* Check pf reset is done */
2615                 if (req->pf_state)
2616                         break;
2617
2618                 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);
2619         } while (timeout++ < HNS3_QUERY_MAX_CNT);
2620
2621         return hns3_parse_func_status(hw, req);
2622 }
2623
2624 static int
2625 hns3_get_pf_max_tqp_num(struct hns3_hw *hw)
2626 {
2627         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2628         struct hns3_pf *pf = &hns->pf;
2629
2630         if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) {
2631                 /*
2632                  * The total_tqps_num obtained from firmware is maximum tqp
2633                  * numbers of this port, which should be used for PF and VFs.
2634                  * There is no need for pf to have so many tqp numbers in
2635                  * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
2636                  * coming from config file, is assigned to maximum queue number
2637                  * for the PF of this port by user. So users can modify the
2638                  * maximum queue number of PF according to their own application
2639                  * scenarios, which is more flexible to use. In addition, many
2640                  * memories can be saved due to allocating queue statistics
2641                  * room according to the actual number of queues required. The
2642                  * maximum queue number of PF for network engine with
2643                  * revision_id greater than 0x30 is assigned by config file.
2644                  */
2645                 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) {
2646                         hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) "
2647                                  "must be greater than 0.",
2648                                  RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF);
2649                         return -EINVAL;
2650                 }
2651
2652                 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
2653                                        hw->total_tqps_num);
2654         } else {
2655                 /*
2656                  * Due to the limitation on the number of PF interrupts
2657                  * available, the maximum queue number assigned to PF on
2658                  * the network engine with revision_id 0x21 is 64.
2659                  */
2660                 hw->tqps_num = RTE_MIN(hw->total_tqps_num,
2661                                        HNS3_MAX_TQP_NUM_HIP08_PF);
2662         }
2663
2664         return 0;
2665 }
2666
2667 static int
2668 hns3_query_pf_resource(struct hns3_hw *hw)
2669 {
2670         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2671         struct hns3_pf *pf = &hns->pf;
2672         struct hns3_pf_res_cmd *req;
2673         struct hns3_cmd_desc desc;
2674         int ret;
2675
2676         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
2677         ret = hns3_cmd_send(hw, &desc, 1);
2678         if (ret) {
2679                 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret);
2680                 return ret;
2681         }
2682
2683         req = (struct hns3_pf_res_cmd *)desc.data;
2684         hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) +
2685                              rte_le_to_cpu_16(req->ext_tqp_num);
2686         ret = hns3_get_pf_max_tqp_num(hw);
2687         if (ret)
2688                 return ret;
2689
2690         pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
2691         pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number);
2692
2693         if (req->tx_buf_size)
2694                 pf->tx_buf_size =
2695                     rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;
2696         else
2697                 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF;
2698
2699         pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);
2700
2701         if (req->dv_buf_size)
2702                 pf->dv_buf_size =
2703                     rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;
2704         else
2705                 pf->dv_buf_size = HNS3_DEFAULT_DV;
2706
2707         pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
2708
2709         hw->num_msi =
2710                 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number),
2711                                HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
2712
2713         return 0;
2714 }
2715
2716 static void
2717 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)
2718 {
2719         struct hns3_cfg_param_cmd *req;
2720         uint64_t mac_addr_tmp_high;
2721         uint8_t ext_rss_size_max;
2722         uint64_t mac_addr_tmp;
2723         uint32_t i;
2724
2725         req = (struct hns3_cfg_param_cmd *)desc[0].data;
2726
2727         /* get the configuration */
2728         cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2729                                      HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
2730         cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2731                                            HNS3_CFG_TQP_DESC_N_M,
2732                                            HNS3_CFG_TQP_DESC_N_S);
2733
2734         cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2735                                        HNS3_CFG_PHY_ADDR_M,
2736                                        HNS3_CFG_PHY_ADDR_S);
2737         cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2738                                          HNS3_CFG_MEDIA_TP_M,
2739                                          HNS3_CFG_MEDIA_TP_S);
2740         cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2741                                          HNS3_CFG_RX_BUF_LEN_M,
2742                                          HNS3_CFG_RX_BUF_LEN_S);
2743         /* get mac address */
2744         mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
2745         mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2746                                            HNS3_CFG_MAC_ADDR_H_M,
2747                                            HNS3_CFG_MAC_ADDR_H_S);
2748
2749         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
2750
2751         cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2752                                             HNS3_CFG_DEFAULT_SPEED_M,
2753                                             HNS3_CFG_DEFAULT_SPEED_S);
2754         cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2755                                            HNS3_CFG_RSS_SIZE_M,
2756                                            HNS3_CFG_RSS_SIZE_S);
2757
2758         for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
2759                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
2760
2761         req = (struct hns3_cfg_param_cmd *)desc[1].data;
2762         cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);
2763
2764         cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2765                                             HNS3_CFG_SPEED_ABILITY_M,
2766                                             HNS3_CFG_SPEED_ABILITY_S);
2767         cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2768                                         HNS3_CFG_UMV_TBL_SPACE_M,
2769                                         HNS3_CFG_UMV_TBL_SPACE_S);
2770         if (!cfg->umv_space)
2771                 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;
2772
2773         ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]),
2774                                                HNS3_CFG_EXT_RSS_SIZE_M,
2775                                                HNS3_CFG_EXT_RSS_SIZE_S);
2776         /*
2777          * Field ext_rss_size_max obtained from firmware will be more flexible
2778          * for future changes and expansions, which is an exponent of 2, instead
2779          * of reading out directly. If this field is not zero, hns3 PF PMD
2780          * driver uses it as rss_size_max under one TC. Device, whose revision
2781          * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the
2782          * maximum number of queues supported under a TC through this field.
2783          */
2784         if (ext_rss_size_max)
2785                 cfg->rss_size_max = 1U << ext_rss_size_max;
2786 }
2787
2788 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash
2789  * @hw: pointer to struct hns3_hw
2790  * @hcfg: the config structure to be getted
2791  */
2792 static int
2793 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)
2794 {
2795         struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];
2796         struct hns3_cfg_param_cmd *req;
2797         uint32_t offset;
2798         uint32_t i;
2799         int ret;
2800
2801         for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {
2802                 offset = 0;
2803                 req = (struct hns3_cfg_param_cmd *)desc[i].data;
2804                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,
2805                                           true);
2806                 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,
2807                                i * HNS3_CFG_RD_LEN_BYTES);
2808                 /* Len should be divided by 4 when send to hardware */
2809                 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,
2810                                HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);
2811                 req->offset = rte_cpu_to_le_32(offset);
2812         }
2813
2814         ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);
2815         if (ret) {
2816                 PMD_INIT_LOG(ERR, "get config failed %d.", ret);
2817                 return ret;
2818         }
2819
2820         hns3_parse_cfg(hcfg, desc);
2821
2822         return 0;
2823 }
2824
2825 static int
2826 hns3_parse_speed(int speed_cmd, uint32_t *speed)
2827 {
2828         switch (speed_cmd) {
2829         case HNS3_CFG_SPEED_10M:
2830                 *speed = RTE_ETH_SPEED_NUM_10M;
2831                 break;
2832         case HNS3_CFG_SPEED_100M:
2833                 *speed = RTE_ETH_SPEED_NUM_100M;
2834                 break;
2835         case HNS3_CFG_SPEED_1G:
2836                 *speed = RTE_ETH_SPEED_NUM_1G;
2837                 break;
2838         case HNS3_CFG_SPEED_10G:
2839                 *speed = RTE_ETH_SPEED_NUM_10G;
2840                 break;
2841         case HNS3_CFG_SPEED_25G:
2842                 *speed = RTE_ETH_SPEED_NUM_25G;
2843                 break;
2844         case HNS3_CFG_SPEED_40G:
2845                 *speed = RTE_ETH_SPEED_NUM_40G;
2846                 break;
2847         case HNS3_CFG_SPEED_50G:
2848                 *speed = RTE_ETH_SPEED_NUM_50G;
2849                 break;
2850         case HNS3_CFG_SPEED_100G:
2851                 *speed = RTE_ETH_SPEED_NUM_100G;
2852                 break;
2853         case HNS3_CFG_SPEED_200G:
2854                 *speed = RTE_ETH_SPEED_NUM_200G;
2855                 break;
2856         default:
2857                 return -EINVAL;
2858         }
2859
2860         return 0;
2861 }
2862
2863 static void
2864 hns3_set_default_dev_specifications(struct hns3_hw *hw)
2865 {
2866         hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
2867         hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
2868         hw->rss_key_size = HNS3_RSS_KEY_SIZE;
2869         hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
2870         hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
2871 }
2872
2873 static void
2874 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
2875 {
2876         struct hns3_dev_specs_0_cmd *req0;
2877
2878         req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
2879
2880         hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
2881         hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
2882         hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
2883         hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
2884         hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
2885 }
2886
2887 static int
2888 hns3_check_dev_specifications(struct hns3_hw *hw)
2889 {
2890         if (hw->rss_ind_tbl_size == 0 ||
2891             hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
2892                 hns3_err(hw, "the size of hash lookup table configured (%u)"
2893                               " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
2894                               HNS3_RSS_IND_TBL_SIZE_MAX);
2895                 return -EINVAL;
2896         }
2897
2898         return 0;
2899 }
2900
2901 static int
2902 hns3_query_dev_specifications(struct hns3_hw *hw)
2903 {
2904         struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
2905         int ret;
2906         int i;
2907
2908         for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
2909                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
2910                                           true);
2911                 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
2912         }
2913         hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
2914
2915         ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
2916         if (ret)
2917                 return ret;
2918
2919         hns3_parse_dev_specifications(hw, desc);
2920
2921         return hns3_check_dev_specifications(hw);
2922 }
2923
2924 static int
2925 hns3_get_capability(struct hns3_hw *hw)
2926 {
2927         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2928         struct rte_pci_device *pci_dev;
2929         struct hns3_pf *pf = &hns->pf;
2930         struct rte_eth_dev *eth_dev;
2931         uint16_t device_id;
2932         uint8_t revision;
2933         int ret;
2934
2935         eth_dev = &rte_eth_devices[hw->data->port_id];
2936         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2937         device_id = pci_dev->id.device_id;
2938
2939         if (device_id == HNS3_DEV_ID_25GE_RDMA ||
2940             device_id == HNS3_DEV_ID_50GE_RDMA ||
2941             device_id == HNS3_DEV_ID_100G_RDMA_MACSEC ||
2942             device_id == HNS3_DEV_ID_200G_RDMA)
2943                 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
2944
2945         /* Get PCI revision id */
2946         ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
2947                                   HNS3_PCI_REVISION_ID);
2948         if (ret != HNS3_PCI_REVISION_ID_LEN) {
2949                 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
2950                              ret);
2951                 return -EIO;
2952         }
2953         hw->revision = revision;
2954
2955         if (revision < PCI_REVISION_ID_HIP09_A) {
2956                 hns3_set_default_dev_specifications(hw);
2957                 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
2958                 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
2959                 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
2960                 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE;
2961                 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
2962                 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
2963                 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE;
2964                 hw->rss_info.ipv6_sctp_offload_supported = false;
2965                 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE;
2966                 pf->support_multi_tc_pause = false;
2967                 return 0;
2968         }
2969
2970         ret = hns3_query_dev_specifications(hw);
2971         if (ret) {
2972                 PMD_INIT_LOG(ERR,
2973                              "failed to query dev specifications, ret = %d",
2974                              ret);
2975                 return ret;
2976         }
2977
2978         hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
2979         hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
2980         hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
2981         hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE;
2982         hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
2983         hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
2984         pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE;
2985         hw->rss_info.ipv6_sctp_offload_supported = true;
2986         hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE;
2987         pf->support_multi_tc_pause = true;
2988
2989         return 0;
2990 }
2991
2992 static int
2993 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type)
2994 {
2995         int ret;
2996
2997         switch (media_type) {
2998         case HNS3_MEDIA_TYPE_COPPER:
2999                 if (!hns3_dev_get_support(hw, COPPER)) {
3000                         PMD_INIT_LOG(ERR,
3001                                      "Media type is copper, not supported.");
3002                         ret = -EOPNOTSUPP;
3003                 } else {
3004                         ret = 0;
3005                 }
3006                 break;
3007         case HNS3_MEDIA_TYPE_FIBER:
3008                 ret = 0;
3009                 break;
3010         case HNS3_MEDIA_TYPE_BACKPLANE:
3011                 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported.");
3012                 ret = -EOPNOTSUPP;
3013                 break;
3014         default:
3015                 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type);
3016                 ret = -EINVAL;
3017                 break;
3018         }
3019
3020         return ret;
3021 }
3022
3023 static int
3024 hns3_get_board_configuration(struct hns3_hw *hw)
3025 {
3026         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3027         struct hns3_pf *pf = &hns->pf;
3028         struct hns3_cfg cfg;
3029         int ret;
3030
3031         ret = hns3_get_board_cfg(hw, &cfg);
3032         if (ret) {
3033                 PMD_INIT_LOG(ERR, "get board config failed %d", ret);
3034                 return ret;
3035         }
3036
3037         ret = hns3_check_media_type(hw, cfg.media_type);
3038         if (ret)
3039                 return ret;
3040
3041         hw->mac.media_type = cfg.media_type;
3042         hw->rss_size_max = cfg.rss_size_max;
3043         hw->rss_dis_flag = false;
3044         memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
3045         hw->mac.phy_addr = cfg.phy_addr;
3046         hw->num_tx_desc = cfg.tqp_desc_num;
3047         hw->num_rx_desc = cfg.tqp_desc_num;
3048         hw->dcb_info.num_pg = 1;
3049         hw->dcb_info.hw_pfc_map = 0;
3050
3051         ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
3052         if (ret) {
3053                 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d",
3054                              cfg.default_speed, ret);
3055                 return ret;
3056         }
3057
3058         pf->tc_max = cfg.tc_num;
3059         if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) {
3060                 PMD_INIT_LOG(WARNING,
3061                              "Get TC num(%u) from flash, set TC num to 1",
3062                              pf->tc_max);
3063                 pf->tc_max = 1;
3064         }
3065
3066         /* Dev does not support DCB */
3067         if (!hns3_dev_get_support(hw, DCB)) {
3068                 pf->tc_max = 1;
3069                 pf->pfc_max = 0;
3070         } else
3071                 pf->pfc_max = pf->tc_max;
3072
3073         hw->dcb_info.num_tc = 1;
3074         hw->alloc_rss_size = RTE_MIN(hw->rss_size_max,
3075                                      hw->tqps_num / hw->dcb_info.num_tc);
3076         hns3_set_bit(hw->hw_tc_map, 0, 1);
3077         pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE;
3078
3079         pf->wanted_umv_size = cfg.umv_space;
3080
3081         return ret;
3082 }
3083
3084 static int
3085 hns3_get_configuration(struct hns3_hw *hw)
3086 {
3087         int ret;
3088
3089         ret = hns3_query_function_status(hw);
3090         if (ret) {
3091                 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret);
3092                 return ret;
3093         }
3094
3095         /* Get device capability */
3096         ret = hns3_get_capability(hw);
3097         if (ret) {
3098                 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
3099                 return ret;
3100         }
3101
3102         /* Get pf resource */
3103         ret = hns3_query_pf_resource(hw);
3104         if (ret) {
3105                 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret);
3106                 return ret;
3107         }
3108
3109         ret = hns3_get_board_configuration(hw);
3110         if (ret) {
3111                 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret);
3112                 return ret;
3113         }
3114
3115         ret = hns3_query_dev_fec_info(hw);
3116         if (ret)
3117                 PMD_INIT_LOG(ERR,
3118                              "failed to query FEC information, ret = %d", ret);
3119
3120         return ret;
3121 }
3122
3123 static int
3124 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid,
3125                       uint16_t tqp_vid, bool is_pf)
3126 {
3127         struct hns3_tqp_map_cmd *req;
3128         struct hns3_cmd_desc desc;
3129         int ret;
3130
3131         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false);
3132
3133         req = (struct hns3_tqp_map_cmd *)desc.data;
3134         req->tqp_id = rte_cpu_to_le_16(tqp_pid);
3135         req->tqp_vf = func_id;
3136         req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B;
3137         if (!is_pf)
3138                 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B);
3139         req->tqp_vid = rte_cpu_to_le_16(tqp_vid);
3140
3141         ret = hns3_cmd_send(hw, &desc, 1);
3142         if (ret)
3143                 PMD_INIT_LOG(ERR, "TQP map failed %d", ret);
3144
3145         return ret;
3146 }
3147
3148 static int
3149 hns3_map_tqp(struct hns3_hw *hw)
3150 {
3151         int ret;
3152         int i;
3153
3154         /*
3155          * In current version, VF is not supported when PF is driven by DPDK
3156          * driver, so we assign total tqps_num tqps allocated to this port
3157          * to PF.
3158          */
3159         for (i = 0; i < hw->total_tqps_num; i++) {
3160                 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true);
3161                 if (ret)
3162                         return ret;
3163         }
3164
3165         return 0;
3166 }
3167
3168 static int
3169 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
3170 {
3171         struct hns3_config_mac_speed_dup_cmd *req;
3172         struct hns3_cmd_desc desc;
3173         int ret;
3174
3175         req = (struct hns3_config_mac_speed_dup_cmd *)desc.data;
3176
3177         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false);
3178
3179         hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
3180
3181         switch (speed) {
3182         case RTE_ETH_SPEED_NUM_10M:
3183                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3184                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
3185                 break;
3186         case RTE_ETH_SPEED_NUM_100M:
3187                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3188                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
3189                 break;
3190         case RTE_ETH_SPEED_NUM_1G:
3191                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3192                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
3193                 break;
3194         case RTE_ETH_SPEED_NUM_10G:
3195                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3196                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
3197                 break;
3198         case RTE_ETH_SPEED_NUM_25G:
3199                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3200                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
3201                 break;
3202         case RTE_ETH_SPEED_NUM_40G:
3203                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3204                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
3205                 break;
3206         case RTE_ETH_SPEED_NUM_50G:
3207                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3208                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
3209                 break;
3210         case RTE_ETH_SPEED_NUM_100G:
3211                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3212                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
3213                 break;
3214         case RTE_ETH_SPEED_NUM_200G:
3215                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3216                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
3217                 break;
3218         default:
3219                 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed);
3220                 return -EINVAL;
3221         }
3222
3223         hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1);
3224
3225         ret = hns3_cmd_send(hw, &desc, 1);
3226         if (ret)
3227                 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret);
3228
3229         return ret;
3230 }
3231
3232 static int
3233 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3234 {
3235         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3236         struct hns3_pf *pf = &hns->pf;
3237         struct hns3_priv_buf *priv;
3238         uint32_t i, total_size;
3239
3240         total_size = pf->pkt_buf_size;
3241
3242         /* alloc tx buffer for all enabled tc */
3243         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3244                 priv = &buf_alloc->priv_buf[i];
3245
3246                 if (hw->hw_tc_map & BIT(i)) {
3247                         if (total_size < pf->tx_buf_size)
3248                                 return -ENOMEM;
3249
3250                         priv->tx_buf_size = pf->tx_buf_size;
3251                 } else
3252                         priv->tx_buf_size = 0;
3253
3254                 total_size -= priv->tx_buf_size;
3255         }
3256
3257         return 0;
3258 }
3259
3260 static int
3261 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3262 {
3263 /* TX buffer size is unit by 128 byte */
3264 #define HNS3_BUF_SIZE_UNIT_SHIFT        7
3265 #define HNS3_BUF_SIZE_UPDATE_EN_MSK     BIT(15)
3266         struct hns3_tx_buff_alloc_cmd *req;
3267         struct hns3_cmd_desc desc;
3268         uint32_t buf_size;
3269         uint32_t i;
3270         int ret;
3271
3272         req = (struct hns3_tx_buff_alloc_cmd *)desc.data;
3273
3274         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0);
3275         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3276                 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
3277
3278                 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT;
3279                 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size |
3280                                                 HNS3_BUF_SIZE_UPDATE_EN_MSK);
3281         }
3282
3283         ret = hns3_cmd_send(hw, &desc, 1);
3284         if (ret)
3285                 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret);
3286
3287         return ret;
3288 }
3289
3290 static int
3291 hns3_get_tc_num(struct hns3_hw *hw)
3292 {
3293         int cnt = 0;
3294         uint8_t i;
3295
3296         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3297                 if (hw->hw_tc_map & BIT(i))
3298                         cnt++;
3299         return cnt;
3300 }
3301
3302 static uint32_t
3303 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3304 {
3305         struct hns3_priv_buf *priv;
3306         uint32_t rx_priv = 0;
3307         int i;
3308
3309         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3310                 priv = &buf_alloc->priv_buf[i];
3311                 if (priv->enable)
3312                         rx_priv += priv->buf_size;
3313         }
3314         return rx_priv;
3315 }
3316
3317 static uint32_t
3318 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3319 {
3320         uint32_t total_tx_size = 0;
3321         uint32_t i;
3322
3323         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3324                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
3325
3326         return total_tx_size;
3327 }
3328
3329 /* Get the number of pfc enabled TCs, which have private buffer */
3330 static int
3331 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3332 {
3333         struct hns3_priv_buf *priv;
3334         int cnt = 0;
3335         uint8_t i;
3336
3337         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3338                 priv = &buf_alloc->priv_buf[i];
3339                 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3340                         cnt++;
3341         }
3342
3343         return cnt;
3344 }
3345
3346 /* Get the number of pfc disabled TCs, which have private buffer */
3347 static int
3348 hns3_get_no_pfc_priv_num(struct hns3_hw *hw,
3349                          struct hns3_pkt_buf_alloc *buf_alloc)
3350 {
3351         struct hns3_priv_buf *priv;
3352         int cnt = 0;
3353         uint8_t i;
3354
3355         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3356                 priv = &buf_alloc->priv_buf[i];
3357                 if (hw->hw_tc_map & BIT(i) &&
3358                     !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3359                         cnt++;
3360         }
3361
3362         return cnt;
3363 }
3364
3365 static bool
3366 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
3367                   uint32_t rx_all)
3368 {
3369         uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
3370         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3371         struct hns3_pf *pf = &hns->pf;
3372         uint32_t shared_buf, aligned_mps;
3373         uint32_t rx_priv;
3374         uint8_t tc_num;
3375         uint8_t i;
3376
3377         tc_num = hns3_get_tc_num(hw);
3378         aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3379
3380         if (hns3_dev_get_support(hw, DCB))
3381                 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
3382                                         pf->dv_buf_size;
3383         else
3384                 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF
3385                                         + pf->dv_buf_size;
3386
3387         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
3388         shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc),
3389                              HNS3_BUF_SIZE_UNIT);
3390
3391         rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);
3392         if (rx_all < rx_priv + shared_std)
3393                 return false;
3394
3395         shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
3396         buf_alloc->s_buf.buf_size = shared_buf;
3397         if (hns3_dev_get_support(hw, DCB)) {
3398                 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
3399                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
3400                         - roundup(aligned_mps / HNS3_BUF_DIV_BY,
3401                                   HNS3_BUF_SIZE_UNIT);
3402         } else {
3403                 buf_alloc->s_buf.self.high =
3404                         aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3405                 buf_alloc->s_buf.self.low = aligned_mps;
3406         }
3407
3408         if (hns3_dev_get_support(hw, DCB)) {
3409                 hi_thrd = shared_buf - pf->dv_buf_size;
3410
3411                 if (tc_num <= NEED_RESERVE_TC_NUM)
3412                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT /
3413                                   BUF_MAX_PERCENT;
3414
3415                 if (tc_num)
3416                         hi_thrd = hi_thrd / tc_num;
3417
3418                 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps);
3419                 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);
3420                 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;
3421         } else {
3422                 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3423                 lo_thrd = aligned_mps;
3424         }
3425
3426         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3427                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
3428                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
3429         }
3430
3431         return true;
3432 }
3433
3434 static bool
3435 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max,
3436                      struct hns3_pkt_buf_alloc *buf_alloc)
3437 {
3438         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3439         struct hns3_pf *pf = &hns->pf;
3440         struct hns3_priv_buf *priv;
3441         uint32_t aligned_mps;
3442         uint32_t rx_all;
3443         uint8_t i;
3444
3445         rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3446         aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3447
3448         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3449                 priv = &buf_alloc->priv_buf[i];
3450
3451                 priv->enable = 0;
3452                 priv->wl.low = 0;
3453                 priv->wl.high = 0;
3454                 priv->buf_size = 0;
3455
3456                 if (!(hw->hw_tc_map & BIT(i)))
3457                         continue;
3458
3459                 priv->enable = 1;
3460                 if (hw->dcb_info.hw_pfc_map & BIT(i)) {
3461                         priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT;
3462                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
3463                                                 HNS3_BUF_SIZE_UNIT);
3464                 } else {
3465                         priv->wl.low = 0;
3466                         priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) :
3467                                         aligned_mps;
3468                 }
3469
3470                 priv->buf_size = priv->wl.high + pf->dv_buf_size;
3471         }
3472
3473         return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3474 }
3475
3476 static bool
3477 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw,
3478                              struct hns3_pkt_buf_alloc *buf_alloc)
3479 {
3480         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3481         struct hns3_pf *pf = &hns->pf;
3482         struct hns3_priv_buf *priv;
3483         int no_pfc_priv_num;
3484         uint32_t rx_all;
3485         uint8_t mask;
3486         int i;
3487
3488         rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3489         no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc);
3490
3491         /* let the last to be cleared first */
3492         for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3493                 priv = &buf_alloc->priv_buf[i];
3494                 mask = BIT((uint8_t)i);
3495                 if (hw->hw_tc_map & mask &&
3496                     !(hw->dcb_info.hw_pfc_map & mask)) {
3497                         /* Clear the no pfc TC private buffer */
3498                         priv->wl.low = 0;
3499                         priv->wl.high = 0;
3500                         priv->buf_size = 0;
3501                         priv->enable = 0;
3502                         no_pfc_priv_num--;
3503                 }
3504
3505                 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3506                     no_pfc_priv_num == 0)
3507                         break;
3508         }
3509
3510         return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3511 }
3512
3513 static bool
3514 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
3515                            struct hns3_pkt_buf_alloc *buf_alloc)
3516 {
3517         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3518         struct hns3_pf *pf = &hns->pf;
3519         struct hns3_priv_buf *priv;
3520         uint32_t rx_all;
3521         int pfc_priv_num;
3522         uint8_t mask;
3523         int i;
3524
3525         rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3526         pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc);
3527
3528         /* let the last to be cleared first */
3529         for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3530                 priv = &buf_alloc->priv_buf[i];
3531                 mask = BIT((uint8_t)i);
3532                 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) {
3533                         /* Reduce the number of pfc TC with private buffer */
3534                         priv->wl.low = 0;
3535                         priv->enable = 0;
3536                         priv->wl.high = 0;
3537                         priv->buf_size = 0;
3538                         pfc_priv_num--;
3539                 }
3540                 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3541                     pfc_priv_num == 0)
3542                         break;
3543         }
3544
3545         return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3546 }
3547
3548 static bool
3549 hns3_only_alloc_priv_buff(struct hns3_hw *hw,
3550                           struct hns3_pkt_buf_alloc *buf_alloc)
3551 {
3552 #define COMPENSATE_BUFFER       0x3C00
3553 #define COMPENSATE_HALF_MPS_NUM 5
3554 #define PRIV_WL_GAP             0x1800
3555         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3556         struct hns3_pf *pf = &hns->pf;
3557         uint32_t tc_num = hns3_get_tc_num(hw);
3558         uint32_t half_mps = pf->mps >> 1;
3559         struct hns3_priv_buf *priv;
3560         uint32_t min_rx_priv;
3561         uint32_t rx_priv;
3562         uint8_t i;
3563
3564         rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3565         if (tc_num)
3566                 rx_priv = rx_priv / tc_num;
3567
3568         if (tc_num <= NEED_RESERVE_TC_NUM)
3569                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
3570
3571         /*
3572          * Minimum value of private buffer in rx direction (min_rx_priv) is
3573          * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private
3574          * buffer if rx_priv is greater than min_rx_priv.
3575          */
3576         min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER +
3577                         COMPENSATE_HALF_MPS_NUM * half_mps;
3578         min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);
3579         rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);
3580         if (rx_priv < min_rx_priv)
3581                 return false;
3582
3583         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3584                 priv = &buf_alloc->priv_buf[i];
3585                 priv->enable = 0;
3586                 priv->wl.low = 0;
3587                 priv->wl.high = 0;
3588                 priv->buf_size = 0;
3589
3590                 if (!(hw->hw_tc_map & BIT(i)))
3591                         continue;
3592
3593                 priv->enable = 1;
3594                 priv->buf_size = rx_priv;
3595                 priv->wl.high = rx_priv - pf->dv_buf_size;
3596                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
3597         }
3598
3599         buf_alloc->s_buf.buf_size = 0;
3600
3601         return true;
3602 }
3603
3604 /*
3605  * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
3606  * @hw: pointer to struct hns3_hw
3607  * @buf_alloc: pointer to buffer calculation data
3608  * @return: 0: calculate sucessful, negative: fail
3609  */
3610 static int
3611 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3612 {
3613         /* When DCB is not supported, rx private buffer is not allocated. */
3614         if (!hns3_dev_get_support(hw, DCB)) {
3615                 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3616                 struct hns3_pf *pf = &hns->pf;
3617                 uint32_t rx_all = pf->pkt_buf_size;
3618
3619                 rx_all -= hns3_get_tx_buff_alloced(buf_alloc);
3620                 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all))
3621                         return -ENOMEM;
3622
3623                 return 0;
3624         }
3625
3626         /*
3627          * Try to allocate privated packet buffer for all TCs without share
3628          * buffer.
3629          */
3630         if (hns3_only_alloc_priv_buff(hw, buf_alloc))
3631                 return 0;
3632
3633         /*
3634          * Try to allocate privated packet buffer for all TCs with share
3635          * buffer.
3636          */
3637         if (hns3_rx_buf_calc_all(hw, true, buf_alloc))
3638                 return 0;
3639
3640         /*
3641          * For different application scenes, the enabled port number, TC number
3642          * and no_drop TC number are different. In order to obtain the better
3643          * performance, software could allocate the buffer size and configure
3644          * the waterline by trying to decrease the private buffer size according
3645          * to the order, namely, waterline of valid tc, pfc disabled tc, pfc
3646          * enabled tc.
3647          */
3648         if (hns3_rx_buf_calc_all(hw, false, buf_alloc))
3649                 return 0;
3650
3651         if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc))
3652                 return 0;
3653
3654         if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc))
3655                 return 0;
3656
3657         return -ENOMEM;
3658 }
3659
3660 static int
3661 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3662 {
3663         struct hns3_rx_priv_buff_cmd *req;
3664         struct hns3_cmd_desc desc;
3665         uint32_t buf_size;
3666         int ret;
3667         int i;
3668
3669         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false);
3670         req = (struct hns3_rx_priv_buff_cmd *)desc.data;
3671
3672         /* Alloc private buffer TCs */
3673         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3674                 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i];
3675
3676                 req->buf_num[i] =
3677                         rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S);
3678                 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B);
3679         }
3680
3681         buf_size = buf_alloc->s_buf.buf_size;
3682         req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) |
3683                                            (1 << HNS3_TC0_PRI_BUF_EN_B));
3684
3685         ret = hns3_cmd_send(hw, &desc, 1);
3686         if (ret)
3687                 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret);
3688
3689         return ret;
3690 }
3691
3692 static int
3693 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3694 {
3695 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2
3696         struct hns3_rx_priv_wl_buf *req;
3697         struct hns3_priv_buf *priv;
3698         struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM];
3699         int i, j;
3700         int ret;
3701
3702         for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) {
3703                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC,
3704                                           false);
3705                 req = (struct hns3_rx_priv_wl_buf *)desc[i].data;
3706
3707                 /* The first descriptor set the NEXT bit to 1 */
3708                 if (i == 0)
3709                         desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3710                 else
3711                         desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3712
3713                 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3714                         uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j;
3715
3716                         priv = &buf_alloc->priv_buf[idx];
3717                         req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >>
3718                                                         HNS3_BUF_UNIT_S);
3719                         req->tc_wl[j].high |=
3720                                 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3721                         req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >>
3722                                                         HNS3_BUF_UNIT_S);
3723                         req->tc_wl[j].low |=
3724                                 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3725                 }
3726         }
3727
3728         /* Send 2 descriptor at one time */
3729         ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM);
3730         if (ret)
3731                 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d",
3732                              ret);
3733         return ret;
3734 }
3735
3736 static int
3737 hns3_common_thrd_config(struct hns3_hw *hw,
3738                         struct hns3_pkt_buf_alloc *buf_alloc)
3739 {
3740 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2
3741         struct hns3_shared_buf *s_buf = &buf_alloc->s_buf;
3742         struct hns3_rx_com_thrd *req;
3743         struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM];
3744         struct hns3_tc_thrd *tc;
3745         int tc_idx;
3746         int i, j;
3747         int ret;
3748
3749         for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) {
3750                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC,
3751                                           false);
3752                 req = (struct hns3_rx_com_thrd *)&desc[i].data;
3753
3754                 /* The first descriptor set the NEXT bit to 1 */
3755                 if (i == 0)
3756                         desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3757                 else
3758                         desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3759
3760                 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3761                         tc_idx = i * HNS3_TC_NUM_ONE_DESC + j;
3762                         tc = &s_buf->tc_thrd[tc_idx];
3763
3764                         req->com_thrd[j].high =
3765                                 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S);
3766                         req->com_thrd[j].high |=
3767                                  rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3768                         req->com_thrd[j].low =
3769                                 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S);
3770                         req->com_thrd[j].low |=
3771                                  rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3772                 }
3773         }
3774
3775         /* Send 2 descriptors at one time */
3776         ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM);
3777         if (ret)
3778                 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret);
3779
3780         return ret;
3781 }
3782
3783 static int
3784 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3785 {
3786         struct hns3_shared_buf *buf = &buf_alloc->s_buf;
3787         struct hns3_rx_com_wl *req;
3788         struct hns3_cmd_desc desc;
3789         int ret;
3790
3791         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false);
3792
3793         req = (struct hns3_rx_com_wl *)desc.data;
3794         req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S);
3795         req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3796
3797         req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S);
3798         req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3799
3800         ret = hns3_cmd_send(hw, &desc, 1);
3801         if (ret)
3802                 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret);
3803
3804         return ret;
3805 }
3806
3807 int
3808 hns3_buffer_alloc(struct hns3_hw *hw)
3809 {
3810         struct hns3_pkt_buf_alloc pkt_buf;
3811         int ret;
3812
3813         memset(&pkt_buf, 0, sizeof(pkt_buf));
3814         ret = hns3_tx_buffer_calc(hw, &pkt_buf);
3815         if (ret) {
3816                 PMD_INIT_LOG(ERR,
3817                              "could not calc tx buffer size for all TCs %d",
3818                              ret);
3819                 return ret;
3820         }
3821
3822         ret = hns3_tx_buffer_alloc(hw, &pkt_buf);
3823         if (ret) {
3824                 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret);
3825                 return ret;
3826         }
3827
3828         ret = hns3_rx_buffer_calc(hw, &pkt_buf);
3829         if (ret) {
3830                 PMD_INIT_LOG(ERR,
3831                              "could not calc rx priv buffer size for all TCs %d",
3832                              ret);
3833                 return ret;
3834         }
3835
3836         ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf);
3837         if (ret) {
3838                 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret);
3839                 return ret;
3840         }
3841
3842         if (hns3_dev_get_support(hw, DCB)) {
3843                 ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
3844                 if (ret) {
3845                         PMD_INIT_LOG(ERR,
3846                                      "could not configure rx private waterline %d",
3847                                      ret);
3848                         return ret;
3849                 }
3850
3851                 ret = hns3_common_thrd_config(hw, &pkt_buf);
3852                 if (ret) {
3853                         PMD_INIT_LOG(ERR,
3854                                      "could not configure common threshold %d",
3855                                      ret);
3856                         return ret;
3857                 }
3858         }
3859
3860         ret = hns3_common_wl_config(hw, &pkt_buf);
3861         if (ret)
3862                 PMD_INIT_LOG(ERR, "could not configure common waterline %d",
3863                              ret);
3864
3865         return ret;
3866 }
3867
3868 static int
3869 hns3_mac_init(struct hns3_hw *hw)
3870 {
3871         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3872         struct hns3_mac *mac = &hw->mac;
3873         struct hns3_pf *pf = &hns->pf;
3874         int ret;
3875
3876         pf->support_sfp_query = true;
3877         mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3878         ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
3879         if (ret) {
3880                 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
3881                 return ret;
3882         }
3883
3884         mac->link_status = RTE_ETH_LINK_DOWN;
3885
3886         return hns3_config_mtu(hw, pf->mps);
3887 }
3888
3889 static int
3890 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
3891 {
3892 #define HNS3_ETHERTYPE_SUCCESS_ADD              0
3893 #define HNS3_ETHERTYPE_ALREADY_ADD              1
3894 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW         2
3895 #define HNS3_ETHERTYPE_KEY_CONFLICT             3
3896         int return_status;
3897
3898         if (cmdq_resp) {
3899                 PMD_INIT_LOG(ERR,
3900                              "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
3901                              cmdq_resp);
3902                 return -EIO;
3903         }
3904
3905         switch (resp_code) {
3906         case HNS3_ETHERTYPE_SUCCESS_ADD:
3907         case HNS3_ETHERTYPE_ALREADY_ADD:
3908                 return_status = 0;
3909                 break;
3910         case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW:
3911                 PMD_INIT_LOG(ERR,
3912                              "add mac ethertype failed for manager table overflow.");
3913                 return_status = -EIO;
3914                 break;
3915         case HNS3_ETHERTYPE_KEY_CONFLICT:
3916                 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict.");
3917                 return_status = -EIO;
3918                 break;
3919         default:
3920                 PMD_INIT_LOG(ERR,
3921                              "add mac ethertype failed for undefined, code=%u.",
3922                              resp_code);
3923                 return_status = -EIO;
3924                 break;
3925         }
3926
3927         return return_status;
3928 }
3929
3930 static int
3931 hns3_add_mgr_tbl(struct hns3_hw *hw,
3932                  const struct hns3_mac_mgr_tbl_entry_cmd *req)
3933 {
3934         struct hns3_cmd_desc desc;
3935         uint8_t resp_code;
3936         uint16_t retval;
3937         int ret;
3938
3939         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false);
3940         memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd));
3941
3942         ret = hns3_cmd_send(hw, &desc, 1);
3943         if (ret) {
3944                 PMD_INIT_LOG(ERR,
3945                              "add mac ethertype failed for cmd_send, ret =%d.",
3946                              ret);
3947                 return ret;
3948         }
3949
3950         resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
3951         retval = rte_le_to_cpu_16(desc.retval);
3952
3953         return hns3_get_mac_ethertype_cmd_status(retval, resp_code);
3954 }
3955
3956 static void
3957 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table,
3958                      int *table_item_num)
3959 {
3960         struct hns3_mac_mgr_tbl_entry_cmd *tbl;
3961
3962         /*
3963          * In current version, we add one item in management table as below:
3964          * 0x0180C200000E -- LLDP MC address
3965          */
3966         tbl = mgr_table;
3967         tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B;
3968         tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP);
3969         tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200));
3970         tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E));
3971         tbl->i_port_bitmap = 0x1;
3972         *table_item_num = 1;
3973 }
3974
3975 static int
3976 hns3_init_mgr_tbl(struct hns3_hw *hw)
3977 {
3978 #define HNS_MAC_MGR_TBL_MAX_SIZE        16
3979         struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE];
3980         int table_item_num;
3981         int ret;
3982         int i;
3983
3984         memset(mgr_table, 0, sizeof(mgr_table));
3985         hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
3986         for (i = 0; i < table_item_num; i++) {
3987                 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
3988                 if (ret) {
3989                         PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
3990                                      ret);
3991                         return ret;
3992                 }
3993         }
3994
3995         return 0;
3996 }
3997
3998 static void
3999 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
4000                         bool en_mc, bool en_bc, int vport_id)
4001 {
4002         if (!param)
4003                 return;
4004
4005         memset(param, 0, sizeof(struct hns3_promisc_param));
4006         if (en_uc)
4007                 param->enable = HNS3_PROMISC_EN_UC;
4008         if (en_mc)
4009                 param->enable |= HNS3_PROMISC_EN_MC;
4010         if (en_bc)
4011                 param->enable |= HNS3_PROMISC_EN_BC;
4012         param->vf_id = vport_id;
4013 }
4014
4015 static int
4016 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
4017 {
4018         struct hns3_promisc_cfg_cmd *req;
4019         struct hns3_cmd_desc desc;
4020         int ret;
4021
4022         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
4023
4024         req = (struct hns3_promisc_cfg_cmd *)desc.data;
4025         req->vf_id = param->vf_id;
4026         req->flag = (param->enable << HNS3_PROMISC_EN_B) |
4027             HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
4028
4029         ret = hns3_cmd_send(hw, &desc, 1);
4030         if (ret)
4031                 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret);
4032
4033         return ret;
4034 }
4035
4036 static int
4037 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
4038 {
4039         struct hns3_promisc_param param;
4040         bool en_bc_pmc = true;
4041         uint8_t vf_id;
4042
4043         /*
4044          * In current version VF is not supported when PF is driven by DPDK
4045          * driver, just need to configure parameters for PF vport.
4046          */
4047         vf_id = HNS3_PF_FUNC_ID;
4048
4049         hns3_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
4050         return hns3_cmd_set_promisc_mode(hw, &param);
4051 }
4052
4053 static int
4054 hns3_promisc_init(struct hns3_hw *hw)
4055 {
4056         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
4057         struct hns3_pf *pf = &hns->pf;
4058         struct hns3_promisc_param param;
4059         uint16_t func_id;
4060         int ret;
4061
4062         ret = hns3_set_promisc_mode(hw, false, false);
4063         if (ret) {
4064                 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret);
4065                 return ret;
4066         }
4067
4068         /*
4069          * In current version VFs are not supported when PF is driven by DPDK
4070          * driver. After PF has been taken over by DPDK, the original VF will
4071          * be invalid. So, there is a possibility of entry residues. It should
4072          * clear VFs's promisc mode to avoid unnecessary bandwidth usage
4073          * during init.
4074          */
4075         for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) {
4076                 hns3_promisc_param_init(&param, false, false, false, func_id);
4077                 ret = hns3_cmd_set_promisc_mode(hw, &param);
4078                 if (ret) {
4079                         PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode,"
4080                                         " ret = %d", func_id, ret);
4081                         return ret;
4082                 }
4083         }
4084
4085         return 0;
4086 }
4087
4088 static void
4089 hns3_promisc_uninit(struct hns3_hw *hw)
4090 {
4091         struct hns3_promisc_param param;
4092         uint16_t func_id;
4093         int ret;
4094
4095         func_id = HNS3_PF_FUNC_ID;
4096
4097         /*
4098          * In current version VFs are not supported when PF is driven by
4099          * DPDK driver, and VFs' promisc mode status has been cleared during
4100          * init and their status will not change. So just clear PF's promisc
4101          * mode status during uninit.
4102          */
4103         hns3_promisc_param_init(&param, false, false, false, func_id);
4104         ret = hns3_cmd_set_promisc_mode(hw, &param);
4105         if (ret)
4106                 PMD_INIT_LOG(ERR, "failed to clear promisc status during"
4107                                 " uninit, ret = %d", ret);
4108 }
4109
4110 static int
4111 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
4112 {
4113         bool allmulti = dev->data->all_multicast ? true : false;
4114         struct hns3_adapter *hns = dev->data->dev_private;
4115         struct hns3_hw *hw = &hns->hw;
4116         uint64_t offloads;
4117         int err;
4118         int ret;
4119
4120         rte_spinlock_lock(&hw->lock);
4121         ret = hns3_set_promisc_mode(hw, true, true);
4122         if (ret) {
4123                 rte_spinlock_unlock(&hw->lock);
4124                 hns3_err(hw, "failed to enable promiscuous mode, ret = %d",
4125                          ret);
4126                 return ret;
4127         }
4128
4129         /*
4130          * When promiscuous mode was enabled, disable the vlan filter to let
4131          * all packets coming in in the receiving direction.
4132          */
4133         offloads = dev->data->dev_conf.rxmode.offloads;
4134         if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
4135                 ret = hns3_enable_vlan_filter(hns, false);
4136                 if (ret) {
4137                         hns3_err(hw, "failed to enable promiscuous mode due to "
4138                                      "failure to disable vlan filter, ret = %d",
4139                                  ret);
4140                         err = hns3_set_promisc_mode(hw, false, allmulti);
4141                         if (err)
4142                                 hns3_err(hw, "failed to restore promiscuous "
4143                                          "status after disable vlan filter "
4144                                          "failed during enabling promiscuous "
4145                                          "mode, ret = %d", ret);
4146                 }
4147         }
4148
4149         rte_spinlock_unlock(&hw->lock);
4150
4151         return ret;
4152 }
4153
4154 static int
4155 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
4156 {
4157         bool allmulti = dev->data->all_multicast ? true : false;
4158         struct hns3_adapter *hns = dev->data->dev_private;
4159         struct hns3_hw *hw = &hns->hw;
4160         uint64_t offloads;
4161         int err;
4162         int ret;
4163
4164         /* If now in all_multicast mode, must remain in all_multicast mode. */
4165         rte_spinlock_lock(&hw->lock);
4166         ret = hns3_set_promisc_mode(hw, false, allmulti);
4167         if (ret) {
4168                 rte_spinlock_unlock(&hw->lock);
4169                 hns3_err(hw, "failed to disable promiscuous mode, ret = %d",
4170                          ret);
4171                 return ret;
4172         }
4173         /* when promiscuous mode was disabled, restore the vlan filter status */
4174         offloads = dev->data->dev_conf.rxmode.offloads;
4175         if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
4176                 ret = hns3_enable_vlan_filter(hns, true);
4177                 if (ret) {
4178                         hns3_err(hw, "failed to disable promiscuous mode due to"
4179                                  " failure to restore vlan filter, ret = %d",
4180                                  ret);
4181                         err = hns3_set_promisc_mode(hw, true, true);
4182                         if (err)
4183                                 hns3_err(hw, "failed to restore promiscuous "
4184                                          "status after enabling vlan filter "
4185                                          "failed during disabling promiscuous "
4186                                          "mode, ret = %d", ret);
4187                 }
4188         }
4189         rte_spinlock_unlock(&hw->lock);
4190
4191         return ret;
4192 }
4193
4194 static int
4195 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
4196 {
4197         struct hns3_adapter *hns = dev->data->dev_private;
4198         struct hns3_hw *hw = &hns->hw;
4199         int ret;
4200
4201         if (dev->data->promiscuous)
4202                 return 0;
4203
4204         rte_spinlock_lock(&hw->lock);
4205         ret = hns3_set_promisc_mode(hw, false, true);
4206         rte_spinlock_unlock(&hw->lock);
4207         if (ret)
4208                 hns3_err(hw, "failed to enable allmulticast mode, ret = %d",
4209                          ret);
4210
4211         return ret;
4212 }
4213
4214 static int
4215 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
4216 {
4217         struct hns3_adapter *hns = dev->data->dev_private;
4218         struct hns3_hw *hw = &hns->hw;
4219         int ret;
4220
4221         /* If now in promiscuous mode, must remain in all_multicast mode. */
4222         if (dev->data->promiscuous)
4223                 return 0;
4224
4225         rte_spinlock_lock(&hw->lock);
4226         ret = hns3_set_promisc_mode(hw, false, false);
4227         rte_spinlock_unlock(&hw->lock);
4228         if (ret)
4229                 hns3_err(hw, "failed to disable allmulticast mode, ret = %d",
4230                          ret);
4231
4232         return ret;
4233 }
4234
4235 static int
4236 hns3_dev_promisc_restore(struct hns3_adapter *hns)
4237 {
4238         struct hns3_hw *hw = &hns->hw;
4239         bool allmulti = hw->data->all_multicast ? true : false;
4240         int ret;
4241
4242         if (hw->data->promiscuous) {
4243                 ret = hns3_set_promisc_mode(hw, true, true);
4244                 if (ret)
4245                         hns3_err(hw, "failed to restore promiscuous mode, "
4246                                  "ret = %d", ret);
4247                 return ret;
4248         }
4249
4250         ret = hns3_set_promisc_mode(hw, false, allmulti);
4251         if (ret)
4252                 hns3_err(hw, "failed to restore allmulticast mode, ret = %d",
4253                          ret);
4254         return ret;
4255 }
4256
4257 static int
4258 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
4259 {
4260         struct hns3_sfp_info_cmd *resp;
4261         struct hns3_cmd_desc desc;
4262         int ret;
4263
4264         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
4265         resp = (struct hns3_sfp_info_cmd *)desc.data;
4266         resp->query_type = HNS3_ACTIVE_QUERY;
4267
4268         ret = hns3_cmd_send(hw, &desc, 1);
4269         if (ret == -EOPNOTSUPP) {
4270                 hns3_warn(hw, "firmware does not support get SFP info,"
4271                           " ret = %d.", ret);
4272                 return ret;
4273         } else if (ret) {
4274                 hns3_err(hw, "get sfp info failed, ret = %d.", ret);
4275                 return ret;
4276         }
4277
4278         /*
4279          * In some case, the speed of MAC obtained from firmware may be 0, it
4280          * shouldn't be set to mac->speed.
4281          */
4282         if (!rte_le_to_cpu_32(resp->sfp_speed))
4283                 return 0;
4284
4285         mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed);
4286         /*
4287          * if resp->supported_speed is 0, it means it's an old version
4288          * firmware, do not update these params.
4289          */
4290         if (resp->supported_speed) {
4291                 mac_info->query_type = HNS3_ACTIVE_QUERY;
4292                 mac_info->supported_speed =
4293                                         rte_le_to_cpu_32(resp->supported_speed);
4294                 mac_info->support_autoneg = resp->autoneg_ability;
4295                 mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
4296                                         : RTE_ETH_LINK_AUTONEG;
4297         } else {
4298                 mac_info->query_type = HNS3_DEFAULT_QUERY;
4299         }
4300
4301         return 0;
4302 }
4303
4304 static uint8_t
4305 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
4306 {
4307         if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
4308                 duplex = RTE_ETH_LINK_FULL_DUPLEX;
4309
4310         return duplex;
4311 }
4312
4313 static int
4314 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
4315 {
4316         struct hns3_mac *mac = &hw->mac;
4317         int ret;
4318
4319         duplex = hns3_check_speed_dup(duplex, speed);
4320         if (mac->link_speed == speed && mac->link_duplex == duplex)
4321                 return 0;
4322
4323         ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex);
4324         if (ret)
4325                 return ret;
4326
4327         ret = hns3_port_shaper_update(hw, speed);
4328         if (ret)
4329                 return ret;
4330
4331         mac->link_speed = speed;
4332         mac->link_duplex = duplex;
4333
4334         return 0;
4335 }
4336
4337 static int
4338 hns3_update_fiber_link_info(struct hns3_hw *hw)
4339 {
4340         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
4341         struct hns3_mac *mac = &hw->mac;
4342         struct hns3_mac mac_info;
4343         int ret;
4344
4345         /* If firmware do not support get SFP/qSFP speed, return directly */
4346         if (!pf->support_sfp_query)
4347                 return 0;
4348
4349         memset(&mac_info, 0, sizeof(struct hns3_mac));
4350         ret = hns3_get_sfp_info(hw, &mac_info);
4351         if (ret == -EOPNOTSUPP) {
4352                 pf->support_sfp_query = false;
4353                 return ret;
4354         } else if (ret)
4355                 return ret;
4356
4357         /* Do nothing if no SFP */
4358         if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
4359                 return 0;
4360
4361         /*
4362          * If query_type is HNS3_ACTIVE_QUERY, it is no need
4363          * to reconfigure the speed of MAC. Otherwise, it indicates
4364          * that the current firmware only supports to obtain the
4365          * speed of the SFP, and the speed of MAC needs to reconfigure.
4366          */
4367         mac->query_type = mac_info.query_type;
4368         if (mac->query_type == HNS3_ACTIVE_QUERY) {
4369                 if (mac_info.link_speed != mac->link_speed) {
4370                         ret = hns3_port_shaper_update(hw, mac_info.link_speed);
4371                         if (ret)
4372                                 return ret;
4373                 }
4374
4375                 mac->link_speed = mac_info.link_speed;
4376                 mac->supported_speed = mac_info.supported_speed;
4377                 mac->support_autoneg = mac_info.support_autoneg;
4378                 mac->link_autoneg = mac_info.link_autoneg;
4379
4380                 return 0;
4381         }
4382
4383         /* Config full duplex for SFP */
4384         return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
4385                                       RTE_ETH_LINK_FULL_DUPLEX);
4386 }
4387
4388 static void
4389 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac)
4390 {
4391 #define HNS3_PHY_SUPPORTED_SPEED_MASK   0x2f
4392
4393         struct hns3_phy_params_bd0_cmd *req;
4394         uint32_t supported;
4395
4396         req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
4397         mac->link_speed = rte_le_to_cpu_32(req->speed);
4398         mac->link_duplex = hns3_get_bit(req->duplex,
4399                                            HNS3_PHY_DUPLEX_CFG_B);
4400         mac->link_autoneg = hns3_get_bit(req->autoneg,
4401                                            HNS3_PHY_AUTONEG_CFG_B);
4402         mac->advertising = rte_le_to_cpu_32(req->advertising);
4403         mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising);
4404         supported = rte_le_to_cpu_32(req->supported);
4405         mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK;
4406         mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT);
4407 }
4408
4409 static int
4410 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac)
4411 {
4412         struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
4413         uint16_t i;
4414         int ret;
4415
4416         for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
4417                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
4418                                           true);
4419                 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4420         }
4421         hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true);
4422
4423         ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
4424         if (ret) {
4425                 hns3_err(hw, "get phy parameters failed, ret = %d.", ret);
4426                 return ret;
4427         }
4428
4429         hns3_parse_copper_phy_params(desc, mac);
4430
4431         return 0;
4432 }
4433
4434 static int
4435 hns3_update_copper_link_info(struct hns3_hw *hw)
4436 {
4437         struct hns3_mac *mac = &hw->mac;
4438         struct hns3_mac mac_info;
4439         int ret;
4440
4441         memset(&mac_info, 0, sizeof(struct hns3_mac));
4442         ret = hns3_get_copper_phy_params(hw, &mac_info);
4443         if (ret)
4444                 return ret;
4445
4446         if (mac_info.link_speed != mac->link_speed) {
4447                 ret = hns3_port_shaper_update(hw, mac_info.link_speed);
4448                 if (ret)
4449                         return ret;
4450         }
4451
4452         mac->link_speed = mac_info.link_speed;
4453         mac->link_duplex = mac_info.link_duplex;
4454         mac->link_autoneg = mac_info.link_autoneg;
4455         mac->supported_speed = mac_info.supported_speed;
4456         mac->advertising = mac_info.advertising;
4457         mac->lp_advertising = mac_info.lp_advertising;
4458         mac->support_autoneg = mac_info.support_autoneg;
4459
4460         return 0;
4461 }
4462
4463 static int
4464 hns3_update_link_info(struct rte_eth_dev *eth_dev)
4465 {
4466         struct hns3_adapter *hns = eth_dev->data->dev_private;
4467         struct hns3_hw *hw = &hns->hw;
4468         int ret = 0;
4469
4470         if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER)
4471                 ret = hns3_update_copper_link_info(hw);
4472         else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER)
4473                 ret = hns3_update_fiber_link_info(hw);
4474
4475         return ret;
4476 }
4477
4478 static int
4479 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
4480 {
4481         struct hns3_config_mac_mode_cmd *req;
4482         struct hns3_cmd_desc desc;
4483         uint32_t loop_en = 0;
4484         uint8_t val = 0;
4485         int ret;
4486
4487         req = (struct hns3_config_mac_mode_cmd *)desc.data;
4488
4489         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false);
4490         if (enable)
4491                 val = 1;
4492         hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val);
4493         hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val);
4494         hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val);
4495         hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val);
4496         hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0);
4497         hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0);
4498         hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0);
4499         hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0);
4500         hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val);
4501         hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
4502
4503         /*
4504          * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
4505          * when receiving frames. Otherwise, CRC will be stripped.
4506          */
4507         if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
4508                 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
4509         else
4510                 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
4511         hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val);
4512         hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val);
4513         hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val);
4514         req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en);
4515
4516         ret = hns3_cmd_send(hw, &desc, 1);
4517         if (ret)
4518                 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret);
4519
4520         return ret;
4521 }
4522
4523 static int
4524 hns3_get_mac_link_status(struct hns3_hw *hw)
4525 {
4526         struct hns3_link_status_cmd *req;
4527         struct hns3_cmd_desc desc;
4528         int link_status;
4529         int ret;
4530
4531         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true);
4532         ret = hns3_cmd_send(hw, &desc, 1);
4533         if (ret) {
4534                 hns3_err(hw, "get link status cmd failed %d", ret);
4535                 return RTE_ETH_LINK_DOWN;
4536         }
4537
4538         req = (struct hns3_link_status_cmd *)desc.data;
4539         link_status = req->status & HNS3_LINK_STATUS_UP_M;
4540
4541         return !!link_status;
4542 }
4543
4544 static bool
4545 hns3_update_link_status(struct hns3_hw *hw)
4546 {
4547         int state;
4548
4549         state = hns3_get_mac_link_status(hw);
4550         if (state != hw->mac.link_status) {
4551                 hw->mac.link_status = state;
4552                 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down");
4553                 return true;
4554         }
4555
4556         return false;
4557 }
4558
4559 void
4560 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query)
4561 {
4562         struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
4563         struct rte_eth_link new_link;
4564         int ret;
4565
4566         if (query)
4567                 hns3_update_port_link_info(dev);
4568
4569         memset(&new_link, 0, sizeof(new_link));
4570         hns3_setup_linkstatus(dev, &new_link);
4571
4572         ret = rte_eth_linkstatus_set(dev, &new_link);
4573         if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
4574                 hns3_start_report_lse(dev);
4575 }
4576
4577 static void
4578 hns3_service_handler(void *param)
4579 {
4580         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
4581         struct hns3_adapter *hns = eth_dev->data->dev_private;
4582         struct hns3_hw *hw = &hns->hw;
4583
4584         if (!hns3_is_reset_pending(hns))
4585                 hns3_update_linkstatus_and_event(hw, true);
4586         else
4587                 hns3_warn(hw, "Cancel the query when reset is pending");
4588
4589         rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
4590 }
4591
4592 static int
4593 hns3_init_hardware(struct hns3_adapter *hns)
4594 {
4595         struct hns3_hw *hw = &hns->hw;
4596         int ret;
4597
4598         ret = hns3_map_tqp(hw);
4599         if (ret) {
4600                 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
4601                 return ret;
4602         }
4603
4604         ret = hns3_init_umv_space(hw);
4605         if (ret) {
4606                 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret);
4607                 return ret;
4608         }
4609
4610         ret = hns3_mac_init(hw);
4611         if (ret) {
4612                 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret);
4613                 goto err_mac_init;
4614         }
4615
4616         ret = hns3_init_mgr_tbl(hw);
4617         if (ret) {
4618                 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret);
4619                 goto err_mac_init;
4620         }
4621
4622         ret = hns3_promisc_init(hw);
4623         if (ret) {
4624                 PMD_INIT_LOG(ERR, "Failed to init promisc: %d",
4625                              ret);
4626                 goto err_mac_init;
4627         }
4628
4629         ret = hns3_init_vlan_config(hns);
4630         if (ret) {
4631                 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret);
4632                 goto err_mac_init;
4633         }
4634
4635         ret = hns3_dcb_init(hw);
4636         if (ret) {
4637                 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret);
4638                 goto err_mac_init;
4639         }
4640
4641         ret = hns3_init_fd_config(hns);
4642         if (ret) {
4643                 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret);
4644                 goto err_mac_init;
4645         }
4646
4647         ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
4648         if (ret) {
4649                 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
4650                 goto err_mac_init;
4651         }
4652
4653         ret = hns3_config_gro(hw, false);
4654         if (ret) {
4655                 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
4656                 goto err_mac_init;
4657         }
4658
4659         /*
4660          * In the initialization clearing the all hardware mapping relationship
4661          * configurations between queues and interrupt vectors is needed, so
4662          * some error caused by the residual configurations, such as the
4663          * unexpected interrupt, can be avoid.
4664          */
4665         ret = hns3_init_ring_with_vector(hw);
4666         if (ret) {
4667                 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
4668                 goto err_mac_init;
4669         }
4670
4671         return 0;
4672
4673 err_mac_init:
4674         hns3_uninit_umv_space(hw);
4675         return ret;
4676 }
4677
4678 static int
4679 hns3_clear_hw(struct hns3_hw *hw)
4680 {
4681         struct hns3_cmd_desc desc;
4682         int ret;
4683
4684         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false);
4685
4686         ret = hns3_cmd_send(hw, &desc, 1);
4687         if (ret && ret != -EOPNOTSUPP)
4688                 return ret;
4689
4690         return 0;
4691 }
4692
4693 static void
4694 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable)
4695 {
4696         uint32_t val;
4697
4698         /*
4699          * The new firmware support report more hardware error types by
4700          * msix mode. These errors are defined as RAS errors in hardware
4701          * and belong to a different type from the MSI-x errors processed
4702          * by the network driver.
4703          *
4704          * Network driver should open the new error report on initialization.
4705          */
4706         val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
4707         hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0);
4708         hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val);
4709 }
4710
4711 static uint32_t
4712 hns3_set_firber_default_support_speed(struct hns3_hw *hw)
4713 {
4714         struct hns3_mac *mac = &hw->mac;
4715
4716         switch (mac->link_speed) {
4717         case RTE_ETH_SPEED_NUM_1G:
4718                 return HNS3_FIBER_LINK_SPEED_1G_BIT;
4719         case RTE_ETH_SPEED_NUM_10G:
4720                 return HNS3_FIBER_LINK_SPEED_10G_BIT;
4721         case RTE_ETH_SPEED_NUM_25G:
4722                 return HNS3_FIBER_LINK_SPEED_25G_BIT;
4723         case RTE_ETH_SPEED_NUM_40G:
4724                 return HNS3_FIBER_LINK_SPEED_40G_BIT;
4725         case RTE_ETH_SPEED_NUM_50G:
4726                 return HNS3_FIBER_LINK_SPEED_50G_BIT;
4727         case RTE_ETH_SPEED_NUM_100G:
4728                 return HNS3_FIBER_LINK_SPEED_100G_BIT;
4729         case RTE_ETH_SPEED_NUM_200G:
4730                 return HNS3_FIBER_LINK_SPEED_200G_BIT;
4731         default:
4732                 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
4733                 return 0;
4734         }
4735 }
4736
4737 /*
4738  * Validity of supported_speed for firber and copper media type can be
4739  * guaranteed by the following policy:
4740  * Copper:
4741  *       Although the initialization of the phy in the firmware may not be
4742  *       completed, the firmware can guarantees that the supported_speed is
4743  *       an valid value.
4744  * Firber:
4745  *       If the version of firmware supports the acitive query way of the
4746  *       HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained
4747  *       through it. If unsupported, use the SFP's speed as the value of the
4748  *       supported_speed.
4749  */
4750 static int
4751 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev)
4752 {
4753         struct hns3_adapter *hns = eth_dev->data->dev_private;
4754         struct hns3_hw *hw = &hns->hw;
4755         struct hns3_mac *mac = &hw->mac;
4756         int ret;
4757
4758         ret = hns3_update_link_info(eth_dev);
4759         if (ret)
4760                 return ret;
4761
4762         if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) {
4763                 /*
4764                  * Some firmware does not support the report of supported_speed,
4765                  * and only report the effective speed of SFP. In this case, it
4766                  * is necessary to use the SFP's speed as the supported_speed.
4767                  */
4768                 if (mac->supported_speed == 0)
4769                         mac->supported_speed =
4770                                 hns3_set_firber_default_support_speed(hw);
4771         }
4772
4773         return 0;
4774 }
4775
4776 static void
4777 hns3_get_fc_autoneg_capability(struct hns3_adapter *hns)
4778 {
4779         struct hns3_mac *mac = &hns->hw.mac;
4780
4781         if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) {
4782                 hns->pf.support_fc_autoneg = true;
4783                 return;
4784         }
4785
4786         /*
4787          * Flow control auto-negotiation requires the cooperation of the driver
4788          * and firmware. Currently, the optical port does not support flow
4789          * control auto-negotiation.
4790          */
4791         hns->pf.support_fc_autoneg = false;
4792 }
4793
4794 static int
4795 hns3_init_pf(struct rte_eth_dev *eth_dev)
4796 {
4797         struct rte_device *dev = eth_dev->device;
4798         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4799         struct hns3_adapter *hns = eth_dev->data->dev_private;
4800         struct hns3_hw *hw = &hns->hw;
4801         int ret;
4802
4803         PMD_INIT_FUNC_TRACE();
4804
4805         /* Get hardware io base address from pcie BAR2 IO space */
4806         hw->io_base = pci_dev->mem_resource[2].addr;
4807
4808         /* Firmware command queue initialize */
4809         ret = hns3_cmd_init_queue(hw);
4810         if (ret) {
4811                 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
4812                 goto err_cmd_init_queue;
4813         }
4814
4815         hns3_clear_all_event_cause(hw);
4816
4817         /* Firmware command initialize */
4818         ret = hns3_cmd_init(hw);
4819         if (ret) {
4820                 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
4821                 goto err_cmd_init;
4822         }
4823
4824         hns3_tx_push_init(eth_dev);
4825
4826         /*
4827          * To ensure that the hardware environment is clean during
4828          * initialization, the driver actively clear the hardware environment
4829          * during initialization, including PF and corresponding VFs' vlan, mac,
4830          * flow table configurations, etc.
4831          */
4832         ret = hns3_clear_hw(hw);
4833         if (ret) {
4834                 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret);
4835                 goto err_cmd_init;
4836         }
4837
4838         /* Hardware statistics of imissed registers cleared. */
4839         ret = hns3_update_imissed_stats(hw, true);
4840         if (ret) {
4841                 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
4842                 goto err_cmd_init;
4843         }
4844
4845         hns3_config_all_msix_error(hw, true);
4846
4847         ret = rte_intr_callback_register(pci_dev->intr_handle,
4848                                          hns3_interrupt_handler,
4849                                          eth_dev);
4850         if (ret) {
4851                 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
4852                 goto err_intr_callback_register;
4853         }
4854
4855         ret = hns3_ptp_init(hw);
4856         if (ret)
4857                 goto err_get_config;
4858
4859         /* Enable interrupt */
4860         rte_intr_enable(pci_dev->intr_handle);
4861         hns3_pf_enable_irq0(hw);
4862
4863         /* Get configuration */
4864         ret = hns3_get_configuration(hw);
4865         if (ret) {
4866                 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
4867                 goto err_get_config;
4868         }
4869
4870         ret = hns3_tqp_stats_init(hw);
4871         if (ret)
4872                 goto err_get_config;
4873
4874         ret = hns3_init_hardware(hns);
4875         if (ret) {
4876                 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
4877                 goto err_init_hw;
4878         }
4879
4880         /* Initialize flow director filter list & hash */
4881         ret = hns3_fdir_filter_init(hns);
4882         if (ret) {
4883                 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
4884                 goto err_fdir;
4885         }
4886
4887         hns3_rss_set_default_args(hw);
4888
4889         ret = hns3_enable_hw_error_intr(hns, true);
4890         if (ret) {
4891                 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d",
4892                              ret);
4893                 goto err_enable_intr;
4894         }
4895
4896         ret = hns3_get_port_supported_speed(eth_dev);
4897         if (ret) {
4898                 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported "
4899                              "by device, ret = %d.", ret);
4900                 goto err_supported_speed;
4901         }
4902
4903         hns3_get_fc_autoneg_capability(hns);
4904
4905         hns3_tm_conf_init(eth_dev);
4906
4907         return 0;
4908
4909 err_supported_speed:
4910         (void)hns3_enable_hw_error_intr(hns, false);
4911 err_enable_intr:
4912         hns3_fdir_filter_uninit(hns);
4913 err_fdir:
4914         hns3_uninit_umv_space(hw);
4915 err_init_hw:
4916         hns3_tqp_stats_uninit(hw);
4917 err_get_config:
4918         hns3_pf_disable_irq0(hw);
4919         rte_intr_disable(pci_dev->intr_handle);
4920         hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler,
4921                              eth_dev);
4922 err_intr_callback_register:
4923 err_cmd_init:
4924         hns3_cmd_uninit(hw);
4925         hns3_cmd_destroy_queue(hw);
4926 err_cmd_init_queue:
4927         hw->io_base = NULL;
4928
4929         return ret;
4930 }
4931
4932 static void
4933 hns3_uninit_pf(struct rte_eth_dev *eth_dev)
4934 {
4935         struct hns3_adapter *hns = eth_dev->data->dev_private;
4936         struct rte_device *dev = eth_dev->device;
4937         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4938         struct hns3_hw *hw = &hns->hw;
4939
4940         PMD_INIT_FUNC_TRACE();
4941
4942         hns3_tm_conf_uninit(eth_dev);
4943         hns3_enable_hw_error_intr(hns, false);
4944         hns3_rss_uninit(hns);
4945         (void)hns3_config_gro(hw, false);
4946         hns3_promisc_uninit(hw);
4947         hns3_flow_uninit(eth_dev);
4948         hns3_fdir_filter_uninit(hns);
4949         hns3_uninit_umv_space(hw);
4950         hns3_tqp_stats_uninit(hw);
4951         hns3_config_mac_tnl_int(hw, false);
4952         hns3_pf_disable_irq0(hw);
4953         rte_intr_disable(pci_dev->intr_handle);
4954         hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler,
4955                              eth_dev);
4956         hns3_config_all_msix_error(hw, false);
4957         hns3_cmd_uninit(hw);
4958         hns3_cmd_destroy_queue(hw);
4959         hw->io_base = NULL;
4960 }
4961
4962 static uint32_t
4963 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
4964 {
4965         uint32_t speed_bit;
4966
4967         switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
4968         case RTE_ETH_LINK_SPEED_10M:
4969                 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
4970                 break;
4971         case RTE_ETH_LINK_SPEED_10M_HD:
4972                 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
4973                 break;
4974         case RTE_ETH_LINK_SPEED_100M:
4975                 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
4976                 break;
4977         case RTE_ETH_LINK_SPEED_100M_HD:
4978                 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
4979                 break;
4980         case RTE_ETH_LINK_SPEED_1G:
4981                 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
4982                 break;
4983         default:
4984                 speed_bit = 0;
4985                 break;
4986         }
4987
4988         return speed_bit;
4989 }
4990
4991 static uint32_t
4992 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
4993 {
4994         uint32_t speed_bit;
4995
4996         switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
4997         case RTE_ETH_LINK_SPEED_1G:
4998                 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
4999                 break;
5000         case RTE_ETH_LINK_SPEED_10G:
5001                 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
5002                 break;
5003         case RTE_ETH_LINK_SPEED_25G:
5004                 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
5005                 break;
5006         case RTE_ETH_LINK_SPEED_40G:
5007                 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
5008                 break;
5009         case RTE_ETH_LINK_SPEED_50G:
5010                 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
5011                 break;
5012         case RTE_ETH_LINK_SPEED_100G:
5013                 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
5014                 break;
5015         case RTE_ETH_LINK_SPEED_200G:
5016                 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
5017                 break;
5018         default:
5019                 speed_bit = 0;
5020                 break;
5021         }
5022
5023         return speed_bit;
5024 }
5025
5026 static int
5027 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
5028 {
5029         struct hns3_mac *mac = &hw->mac;
5030         uint32_t supported_speed = mac->supported_speed;
5031         uint32_t speed_bit = 0;
5032
5033         if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
5034                 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds);
5035         else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER)
5036                 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds);
5037
5038         if (!(speed_bit & supported_speed)) {
5039                 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.",
5040                          link_speeds);
5041                 return -EINVAL;
5042         }
5043
5044         return 0;
5045 }
5046
5047 static inline uint32_t
5048 hns3_get_link_speed(uint32_t link_speeds)
5049 {
5050         uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
5051
5052         if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
5053             link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
5054                 speed = RTE_ETH_SPEED_NUM_10M;
5055         if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
5056             link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
5057                 speed = RTE_ETH_SPEED_NUM_100M;
5058         if (link_speeds & RTE_ETH_LINK_SPEED_1G)
5059                 speed = RTE_ETH_SPEED_NUM_1G;
5060         if (link_speeds & RTE_ETH_LINK_SPEED_10G)
5061                 speed = RTE_ETH_SPEED_NUM_10G;
5062         if (link_speeds & RTE_ETH_LINK_SPEED_25G)
5063                 speed = RTE_ETH_SPEED_NUM_25G;
5064         if (link_speeds & RTE_ETH_LINK_SPEED_40G)
5065                 speed = RTE_ETH_SPEED_NUM_40G;
5066         if (link_speeds & RTE_ETH_LINK_SPEED_50G)
5067                 speed = RTE_ETH_SPEED_NUM_50G;
5068         if (link_speeds & RTE_ETH_LINK_SPEED_100G)
5069                 speed = RTE_ETH_SPEED_NUM_100G;
5070         if (link_speeds & RTE_ETH_LINK_SPEED_200G)
5071                 speed = RTE_ETH_SPEED_NUM_200G;
5072
5073         return speed;
5074 }
5075
5076 static uint8_t
5077 hns3_get_link_duplex(uint32_t link_speeds)
5078 {
5079         if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
5080             (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
5081                 return RTE_ETH_LINK_HALF_DUPLEX;
5082         else
5083                 return RTE_ETH_LINK_FULL_DUPLEX;
5084 }
5085
5086 static int
5087 hns3_set_copper_port_link_speed(struct hns3_hw *hw,
5088                                 struct hns3_set_link_speed_cfg *cfg)
5089 {
5090         struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
5091         struct hns3_phy_params_bd0_cmd *req;
5092         uint16_t i;
5093
5094         for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
5095                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
5096                                           false);
5097                 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
5098         }
5099         hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false);
5100         req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
5101         req->autoneg = cfg->autoneg;
5102
5103         /*
5104          * The full speed capability is used to negotiate when
5105          * auto-negotiation is enabled.
5106          */
5107         if (cfg->autoneg) {
5108                 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT |
5109                                     HNS3_PHY_LINK_SPEED_10M_HD_BIT |
5110                                     HNS3_PHY_LINK_SPEED_100M_BIT |
5111                                     HNS3_PHY_LINK_SPEED_100M_HD_BIT |
5112                                     HNS3_PHY_LINK_SPEED_1000M_BIT;
5113         } else {
5114                 req->speed = cfg->speed;
5115                 req->duplex = cfg->duplex;
5116         }
5117
5118         return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
5119 }
5120
5121 static int
5122 hns3_set_autoneg(struct hns3_hw *hw, bool enable)
5123 {
5124         struct hns3_config_auto_neg_cmd *req;
5125         struct hns3_cmd_desc desc;
5126         uint32_t flag = 0;
5127         int ret;
5128
5129         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false);
5130
5131         req = (struct hns3_config_auto_neg_cmd *)desc.data;
5132         if (enable)
5133                 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1);
5134         req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag);
5135
5136         ret = hns3_cmd_send(hw, &desc, 1);
5137         if (ret)
5138                 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret);
5139
5140         return ret;
5141 }
5142
5143 static int
5144 hns3_set_fiber_port_link_speed(struct hns3_hw *hw,
5145                                struct hns3_set_link_speed_cfg *cfg)
5146 {
5147         int ret;
5148
5149         if (hw->mac.support_autoneg) {
5150                 ret = hns3_set_autoneg(hw, cfg->autoneg);
5151                 if (ret) {
5152                         hns3_err(hw, "failed to configure auto-negotiation.");
5153                         return ret;
5154                 }
5155
5156                 /*
5157                  * To enable auto-negotiation, we only need to open the switch
5158                  * of auto-negotiation, then firmware sets all speed
5159                  * capabilities.
5160                  */
5161                 if (cfg->autoneg)
5162                         return 0;
5163         }
5164
5165         /*
5166          * Some hardware doesn't support auto-negotiation, but users may not
5167          * configure link_speeds (default 0), which means auto-negotiation.
5168          * In this case, a warning message need to be printed, instead of
5169          * an error.
5170          */
5171         if (cfg->autoneg) {
5172                 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!");
5173                 return 0;
5174         }
5175
5176         return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex);
5177 }
5178
5179 static int
5180 hns3_set_port_link_speed(struct hns3_hw *hw,
5181                          struct hns3_set_link_speed_cfg *cfg)
5182 {
5183         int ret;
5184
5185         if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) {
5186 #if defined(RTE_HNS3_ONLY_1630_FPGA)
5187                 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
5188                 if (pf->is_tmp_phy)
5189                         return 0;
5190 #endif
5191
5192                 ret = hns3_set_copper_port_link_speed(hw, cfg);
5193                 if (ret) {
5194                         hns3_err(hw, "failed to set copper port link speed,"
5195                                  "ret = %d.", ret);
5196                         return ret;
5197                 }
5198         } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) {
5199                 ret = hns3_set_fiber_port_link_speed(hw, cfg);
5200                 if (ret) {
5201                         hns3_err(hw, "failed to set fiber port link speed,"
5202                                  "ret = %d.", ret);
5203                         return ret;
5204                 }
5205         }
5206
5207         return 0;
5208 }
5209
5210 static int
5211 hns3_apply_link_speed(struct hns3_hw *hw)
5212 {
5213         struct rte_eth_conf *conf = &hw->data->dev_conf;
5214         struct hns3_set_link_speed_cfg cfg;
5215
5216         memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
5217         cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
5218                         RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
5219         if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
5220                 cfg.speed = hns3_get_link_speed(conf->link_speeds);
5221                 cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
5222         }
5223
5224         return hns3_set_port_link_speed(hw, &cfg);
5225 }
5226
5227 static int
5228 hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
5229 {
5230         struct hns3_hw *hw = &hns->hw;
5231         bool link_en;
5232         int ret;
5233
5234         ret = hns3_update_queue_map_configure(hns);
5235         if (ret) {
5236                 hns3_err(hw, "failed to update queue mapping configuration, ret = %d",
5237                          ret);
5238                 return ret;
5239         }
5240
5241         /* Note: hns3_tm_conf_update must be called after configuring DCB. */
5242         ret = hns3_tm_conf_update(hw);
5243         if (ret) {
5244                 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret);
5245                 return ret;
5246         }
5247
5248         hns3_enable_rxd_adv_layout(hw);
5249
5250         ret = hns3_init_queues(hns, reset_queue);
5251         if (ret) {
5252                 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret);
5253                 return ret;
5254         }
5255
5256         link_en = hw->set_link_down ? false : true;
5257         ret = hns3_cfg_mac_mode(hw, link_en);
5258         if (ret) {
5259                 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret);
5260                 goto err_config_mac_mode;
5261         }
5262
5263         ret = hns3_apply_link_speed(hw);
5264         if (ret)
5265                 goto err_set_link_speed;
5266
5267         return 0;
5268
5269 err_set_link_speed:
5270         (void)hns3_cfg_mac_mode(hw, false);
5271
5272 err_config_mac_mode:
5273         hns3_dev_release_mbufs(hns);
5274         /*
5275          * Here is exception handling, hns3_reset_all_tqps will have the
5276          * corresponding error message if it is handled incorrectly, so it is
5277          * not necessary to check hns3_reset_all_tqps return value, here keep
5278          * ret as the error code causing the exception.
5279          */
5280         (void)hns3_reset_all_tqps(hns);
5281         return ret;
5282 }
5283
5284 static int
5285 hns3_map_rx_interrupt(struct rte_eth_dev *dev)
5286 {
5287         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5288         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
5289         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5290         uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
5291         uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
5292         uint32_t intr_vector;
5293         uint16_t q_id;
5294         int ret;
5295
5296         /*
5297          * hns3 needs a separate interrupt to be used as event interrupt which
5298          * could not be shared with task queue pair, so KERNEL drivers need
5299          * support multiple interrupt vectors.
5300          */
5301         if (dev->data->dev_conf.intr_conf.rxq == 0 ||
5302             !rte_intr_cap_multiple(intr_handle))
5303                 return 0;
5304
5305         rte_intr_disable(intr_handle);
5306         intr_vector = hw->used_rx_queues;
5307         /* creates event fd for each intr vector when MSIX is used */
5308         if (rte_intr_efd_enable(intr_handle, intr_vector))
5309                 return -EINVAL;
5310
5311         /* Allocate vector list */
5312         if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
5313                                     hw->used_rx_queues)) {
5314                 hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
5315                          hw->used_rx_queues);
5316                 ret = -ENOMEM;
5317                 goto alloc_intr_vec_error;
5318         }
5319
5320         if (rte_intr_allow_others(intr_handle)) {
5321                 vec = RTE_INTR_VEC_RXTX_OFFSET;
5322                 base = RTE_INTR_VEC_RXTX_OFFSET;
5323         }
5324
5325         for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
5326                 ret = hns3_bind_ring_with_vector(hw, vec, true,
5327                                                  HNS3_RING_TYPE_RX, q_id);
5328                 if (ret)
5329                         goto bind_vector_error;
5330
5331                 if (rte_intr_vec_list_index_set(intr_handle, q_id, vec))
5332                         goto bind_vector_error;
5333                 /*
5334                  * If there are not enough efds (e.g. not enough interrupt),
5335                  * remaining queues will be bond to the last interrupt.
5336                  */
5337                 if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
5338                         vec++;
5339         }
5340         rte_intr_enable(intr_handle);
5341         return 0;
5342
5343 bind_vector_error:
5344         rte_intr_vec_list_free(intr_handle);
5345 alloc_intr_vec_error:
5346         rte_intr_efd_disable(intr_handle);
5347         return ret;
5348 }
5349
5350 static int
5351 hns3_restore_rx_interrupt(struct hns3_hw *hw)
5352 {
5353         struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
5354         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5355         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
5356         uint16_t q_id;
5357         int ret;
5358
5359         if (dev->data->dev_conf.intr_conf.rxq == 0)
5360                 return 0;
5361
5362         if (rte_intr_dp_is_en(intr_handle)) {
5363                 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
5364                         ret = hns3_bind_ring_with_vector(hw,
5365                                 rte_intr_vec_list_index_get(intr_handle,
5366                                                                    q_id),
5367                                 true, HNS3_RING_TYPE_RX, q_id);
5368                         if (ret)
5369                                 return ret;
5370                 }
5371         }
5372
5373         return 0;
5374 }
5375
5376 static void
5377 hns3_restore_filter(struct rte_eth_dev *dev)
5378 {
5379         hns3_restore_rss_filter(dev);
5380 }
5381
5382 static int
5383 hns3_dev_start(struct rte_eth_dev *dev)
5384 {
5385         struct hns3_adapter *hns = dev->data->dev_private;
5386         struct hns3_hw *hw = &hns->hw;
5387         bool old_state = hw->set_link_down;
5388         int ret;
5389
5390         PMD_INIT_FUNC_TRACE();
5391         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
5392                 return -EBUSY;
5393
5394         rte_spinlock_lock(&hw->lock);
5395         hw->adapter_state = HNS3_NIC_STARTING;
5396
5397         /*
5398          * If the dev_set_link_down() API has been called, the "set_link_down"
5399          * flag can be cleared by dev_start() API. In addition, the flag should
5400          * also be cleared before calling hns3_do_start() so that MAC can be
5401          * enabled in dev_start stage.
5402          */
5403         hw->set_link_down = false;
5404         ret = hns3_do_start(hns, true);
5405         if (ret)
5406                 goto do_start_fail;
5407
5408         ret = hns3_map_rx_interrupt(dev);
5409         if (ret)
5410                 goto map_rx_inter_err;
5411
5412         /*
5413          * There are three register used to control the status of a TQP
5414          * (contains a pair of Tx queue and Rx queue) in the new version network
5415          * engine. One is used to control the enabling of Tx queue, the other is
5416          * used to control the enabling of Rx queue, and the last is the master
5417          * switch used to control the enabling of the tqp. The Tx register and
5418          * TQP register must be enabled at the same time to enable a Tx queue.
5419          * The same applies to the Rx queue. For the older network engine, this
5420          * function only refresh the enabled flag, and it is used to update the
5421          * status of queue in the dpdk framework.
5422          */
5423         ret = hns3_start_all_txqs(dev);
5424         if (ret)
5425                 goto map_rx_inter_err;
5426
5427         ret = hns3_start_all_rxqs(dev);
5428         if (ret)
5429                 goto start_all_rxqs_fail;
5430
5431         hw->adapter_state = HNS3_NIC_STARTED;
5432         rte_spinlock_unlock(&hw->lock);
5433
5434         hns3_rx_scattered_calc(dev);
5435         hns3_set_rxtx_function(dev);
5436         hns3_mp_req_start_rxtx(dev);
5437
5438         hns3_restore_filter(dev);
5439
5440         /* Enable interrupt of all rx queues before enabling queues */
5441         hns3_dev_all_rx_queue_intr_enable(hw, true);
5442
5443         /*
5444          * After finished the initialization, enable tqps to receive/transmit
5445          * packets and refresh all queue status.
5446          */
5447         hns3_start_tqps(hw);
5448
5449         hns3_tm_dev_start_proc(hw);
5450
5451         if (dev->data->dev_conf.intr_conf.lsc != 0)
5452                 hns3_dev_link_update(dev, 0);
5453         rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
5454
5455         hns3_info(hw, "hns3 dev start successful!");
5456
5457         return 0;
5458
5459 start_all_rxqs_fail:
5460         hns3_stop_all_txqs(dev);
5461 map_rx_inter_err:
5462         (void)hns3_do_stop(hns);
5463 do_start_fail:
5464         hw->set_link_down = old_state;
5465         hw->adapter_state = HNS3_NIC_CONFIGURED;
5466         rte_spinlock_unlock(&hw->lock);
5467
5468         return ret;
5469 }
5470
5471 static int
5472 hns3_do_stop(struct hns3_adapter *hns)
5473 {
5474         struct hns3_hw *hw = &hns->hw;
5475         int ret;
5476
5477         /*
5478          * The "hns3_do_stop" function will also be called by .stop_service to
5479          * prepare reset. At the time of global or IMP reset, the command cannot
5480          * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
5481          * accessed during the reset process. So the mbuf can not be released
5482          * during reset and is required to be released after the reset is
5483          * completed.
5484          */
5485         if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
5486                 hns3_dev_release_mbufs(hns);
5487
5488         ret = hns3_cfg_mac_mode(hw, false);
5489         if (ret)
5490                 return ret;
5491         hw->mac.link_status = RTE_ETH_LINK_DOWN;
5492
5493         if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
5494                 hns3_configure_all_mac_addr(hns, true);
5495                 ret = hns3_reset_all_tqps(hns);
5496                 if (ret) {
5497                         hns3_err(hw, "failed to reset all queues ret = %d.",
5498                                  ret);
5499                         return ret;
5500                 }
5501         }
5502
5503         return 0;
5504 }
5505
5506 static void
5507 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
5508 {
5509         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5510         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
5511         struct hns3_adapter *hns = dev->data->dev_private;
5512         struct hns3_hw *hw = &hns->hw;
5513         uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
5514         uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
5515         uint16_t q_id;
5516
5517         if (dev->data->dev_conf.intr_conf.rxq == 0)
5518                 return;
5519
5520         /* unmap the ring with vector */
5521         if (rte_intr_allow_others(intr_handle)) {
5522                 vec = RTE_INTR_VEC_RXTX_OFFSET;
5523                 base = RTE_INTR_VEC_RXTX_OFFSET;
5524         }
5525         if (rte_intr_dp_is_en(intr_handle)) {
5526                 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
5527                         (void)hns3_bind_ring_with_vector(hw, vec, false,
5528                                                          HNS3_RING_TYPE_RX,
5529                                                          q_id);
5530                         if (vec < base + rte_intr_nb_efd_get(intr_handle)
5531                                                                         - 1)
5532                                 vec++;
5533                 }
5534         }
5535         /* Clean datapath event and queue/vec mapping */
5536         rte_intr_efd_disable(intr_handle);
5537         rte_intr_vec_list_free(intr_handle);
5538 }
5539
5540 static int
5541 hns3_dev_stop(struct rte_eth_dev *dev)
5542 {
5543         struct hns3_adapter *hns = dev->data->dev_private;
5544         struct hns3_hw *hw = &hns->hw;
5545
5546         PMD_INIT_FUNC_TRACE();
5547         dev->data->dev_started = 0;
5548
5549         hw->adapter_state = HNS3_NIC_STOPPING;
5550         hns3_set_rxtx_function(dev);
5551         rte_wmb();
5552         /* Disable datapath on secondary process. */
5553         hns3_mp_req_stop_rxtx(dev);
5554         /* Prevent crashes when queues are still in use. */
5555         rte_delay_ms(hw->cfg_max_queues);
5556
5557         rte_spinlock_lock(&hw->lock);
5558         if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
5559                 hns3_tm_dev_stop_proc(hw);
5560                 hns3_config_mac_tnl_int(hw, false);
5561                 hns3_stop_tqps(hw);
5562                 hns3_do_stop(hns);
5563                 hns3_unmap_rx_interrupt(dev);
5564                 hw->adapter_state = HNS3_NIC_CONFIGURED;
5565         }
5566         hns3_rx_scattered_reset(dev);
5567         rte_eal_alarm_cancel(hns3_service_handler, dev);
5568         hns3_stop_report_lse(dev);
5569         rte_spinlock_unlock(&hw->lock);
5570
5571         return 0;
5572 }
5573
5574 static int
5575 hns3_dev_close(struct rte_eth_dev *eth_dev)
5576 {
5577         struct hns3_adapter *hns = eth_dev->data->dev_private;
5578         struct hns3_hw *hw = &hns->hw;
5579         int ret = 0;
5580
5581         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
5582                 hns3_mp_uninit(eth_dev);
5583                 return 0;
5584         }
5585
5586         if (hw->adapter_state == HNS3_NIC_STARTED)
5587                 ret = hns3_dev_stop(eth_dev);
5588
5589         hw->adapter_state = HNS3_NIC_CLOSING;
5590         hns3_reset_abort(hns);
5591         hw->adapter_state = HNS3_NIC_CLOSED;
5592
5593         hns3_configure_all_mc_mac_addr(hns, true);
5594         hns3_remove_all_vlan_table(hns);
5595         hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
5596         hns3_uninit_pf(eth_dev);
5597         hns3_free_all_queues(eth_dev);
5598         rte_free(hw->reset.wait_data);
5599         hns3_mp_uninit(eth_dev);
5600         hns3_warn(hw, "Close port %u finished", hw->data->port_id);
5601
5602         return ret;
5603 }
5604
5605 static void
5606 hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause,
5607                                    bool *tx_pause)
5608 {
5609         struct hns3_mac *mac = &hw->mac;
5610         uint32_t advertising = mac->advertising;
5611         uint32_t lp_advertising = mac->lp_advertising;
5612         *rx_pause = false;
5613         *tx_pause = false;
5614
5615         if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) {
5616                 *rx_pause = true;
5617                 *tx_pause = true;
5618         } else if (advertising & lp_advertising &
5619                    HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) {
5620                 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT)
5621                         *rx_pause = true;
5622                 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT)
5623                         *tx_pause = true;
5624         }
5625 }
5626
5627 static enum hns3_fc_mode
5628 hns3_get_autoneg_fc_mode(struct hns3_hw *hw)
5629 {
5630         enum hns3_fc_mode current_mode;
5631         bool rx_pause = false;
5632         bool tx_pause = false;
5633
5634         switch (hw->mac.media_type) {
5635         case HNS3_MEDIA_TYPE_COPPER:
5636                 hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause);
5637                 break;
5638
5639         /*
5640          * Flow control auto-negotiation is not supported for fiber and
5641          * backpalne media type.
5642          */
5643         case HNS3_MEDIA_TYPE_FIBER:
5644         case HNS3_MEDIA_TYPE_BACKPLANE:
5645                 hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled.");
5646                 current_mode = hw->requested_fc_mode;
5647                 goto out;
5648         default:
5649                 hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).",
5650                          hw->mac.media_type);
5651                 current_mode = HNS3_FC_NONE;
5652                 goto out;
5653         }
5654
5655         if (rx_pause && tx_pause)
5656                 current_mode = HNS3_FC_FULL;
5657         else if (rx_pause)
5658                 current_mode = HNS3_FC_RX_PAUSE;
5659         else if (tx_pause)
5660                 current_mode = HNS3_FC_TX_PAUSE;
5661         else
5662                 current_mode = HNS3_FC_NONE;
5663
5664 out:
5665         return current_mode;
5666 }
5667
5668 static enum hns3_fc_mode
5669 hns3_get_current_fc_mode(struct rte_eth_dev *dev)
5670 {
5671         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5672         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5673         struct hns3_mac *mac = &hw->mac;
5674
5675         /*
5676          * When the flow control mode is obtained, the device may not complete
5677          * auto-negotiation. It is necessary to wait for link establishment.
5678          */
5679         (void)hns3_dev_link_update(dev, 1);
5680
5681         /*
5682          * If the link auto-negotiation of the nic is disabled, or the flow
5683          * control auto-negotiation is not supported, the forced flow control
5684          * mode is used.
5685          */
5686         if (mac->link_autoneg == 0 || !pf->support_fc_autoneg)
5687                 return hw->requested_fc_mode;
5688
5689         return hns3_get_autoneg_fc_mode(hw);
5690 }
5691
5692 static int
5693 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
5694 {
5695         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5696         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5697         enum hns3_fc_mode current_mode;
5698
5699         current_mode = hns3_get_current_fc_mode(dev);
5700         switch (current_mode) {
5701         case HNS3_FC_FULL:
5702                 fc_conf->mode = RTE_ETH_FC_FULL;
5703                 break;
5704         case HNS3_FC_TX_PAUSE:
5705                 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
5706                 break;
5707         case HNS3_FC_RX_PAUSE:
5708                 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
5709                 break;
5710         case HNS3_FC_NONE:
5711         default:
5712                 fc_conf->mode = RTE_ETH_FC_NONE;
5713                 break;
5714         }
5715
5716         fc_conf->pause_time = pf->pause_time;
5717         fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0;
5718
5719         return 0;
5720 }
5721
5722 static int
5723 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg)
5724 {
5725         struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
5726
5727         if (!pf->support_fc_autoneg) {
5728                 if (autoneg != 0) {
5729                         hns3_err(hw, "unsupported fc auto-negotiation setting.");
5730                         return -EOPNOTSUPP;
5731                 }
5732
5733                 /*
5734                  * Flow control auto-negotiation of the NIC is not supported,
5735                  * but other auto-negotiation features may be supported.
5736                  */
5737                 if (autoneg != hw->mac.link_autoneg) {
5738                         hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!");
5739                         return -EOPNOTSUPP;
5740                 }
5741
5742                 return 0;
5743         }
5744
5745         /*
5746          * If flow control auto-negotiation of the NIC is supported, all
5747          * auto-negotiation features are supported.
5748          */
5749         if (autoneg != hw->mac.link_autoneg) {
5750                 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!");
5751                 return -EOPNOTSUPP;
5752         }
5753
5754         return 0;
5755 }
5756
5757 static int
5758 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
5759 {
5760         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5761         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5762         int ret;
5763
5764         if (fc_conf->high_water || fc_conf->low_water ||
5765             fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) {
5766                 hns3_err(hw, "Unsupported flow control settings specified, "
5767                          "high_water(%u), low_water(%u), send_xon(%u) and "
5768                          "mac_ctrl_frame_fwd(%u) must be set to '0'",
5769                          fc_conf->high_water, fc_conf->low_water,
5770                          fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd);
5771                 return -EINVAL;
5772         }
5773
5774         ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg);
5775         if (ret)
5776                 return ret;
5777
5778         if (!fc_conf->pause_time) {
5779                 hns3_err(hw, "Invalid pause time %u setting.",
5780                          fc_conf->pause_time);
5781                 return -EINVAL;
5782         }
5783
5784         if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
5785             hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) {
5786                 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. "
5787                          "current_fc_status = %d", hw->current_fc_status);
5788                 return -EOPNOTSUPP;
5789         }
5790
5791         if (hw->num_tc > 1 && !pf->support_multi_tc_pause) {
5792                 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported.");
5793                 return -EOPNOTSUPP;
5794         }
5795
5796         rte_spinlock_lock(&hw->lock);
5797         ret = hns3_fc_enable(dev, fc_conf);
5798         rte_spinlock_unlock(&hw->lock);
5799
5800         return ret;
5801 }
5802
5803 static int
5804 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,
5805                             struct rte_eth_pfc_conf *pfc_conf)
5806 {
5807         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5808         int ret;
5809
5810         if (!hns3_dev_get_support(hw, DCB)) {
5811                 hns3_err(hw, "This port does not support dcb configurations.");
5812                 return -EOPNOTSUPP;
5813         }
5814
5815         if (pfc_conf->fc.high_water || pfc_conf->fc.low_water ||
5816             pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) {
5817                 hns3_err(hw, "Unsupported flow control settings specified, "
5818                          "high_water(%u), low_water(%u), send_xon(%u) and "
5819                          "mac_ctrl_frame_fwd(%u) must be set to '0'",
5820                          pfc_conf->fc.high_water, pfc_conf->fc.low_water,
5821                          pfc_conf->fc.send_xon,
5822                          pfc_conf->fc.mac_ctrl_frame_fwd);
5823                 return -EINVAL;
5824         }
5825         if (pfc_conf->fc.autoneg) {
5826                 hns3_err(hw, "Unsupported fc auto-negotiation setting.");
5827                 return -EINVAL;
5828         }
5829         if (pfc_conf->fc.pause_time == 0) {
5830                 hns3_err(hw, "Invalid pause time %u setting.",
5831                          pfc_conf->fc.pause_time);
5832                 return -EINVAL;
5833         }
5834
5835         if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
5836             hw->current_fc_status == HNS3_FC_STATUS_PFC)) {
5837                 hns3_err(hw, "MAC pause is enabled. Cannot set PFC."
5838                              "current_fc_status = %d", hw->current_fc_status);
5839                 return -EOPNOTSUPP;
5840         }
5841
5842         rte_spinlock_lock(&hw->lock);
5843         ret = hns3_dcb_pfc_enable(dev, pfc_conf);
5844         rte_spinlock_unlock(&hw->lock);
5845
5846         return ret;
5847 }
5848
5849 static int
5850 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
5851 {
5852         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5853         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5854         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
5855         int i;
5856
5857         rte_spinlock_lock(&hw->lock);
5858         if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
5859                 dcb_info->nb_tcs = pf->local_max_tc;
5860         else
5861                 dcb_info->nb_tcs = 1;
5862
5863         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
5864                 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i];
5865         for (i = 0; i < dcb_info->nb_tcs; i++)
5866                 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
5867
5868         for (i = 0; i < hw->num_tc; i++) {
5869                 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i;
5870                 dcb_info->tc_queue.tc_txq[0][i].base =
5871                                                 hw->tc_queue[i].tqp_offset;
5872                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size;
5873                 dcb_info->tc_queue.tc_txq[0][i].nb_queue =
5874                                                 hw->tc_queue[i].tqp_count;
5875         }
5876         rte_spinlock_unlock(&hw->lock);
5877
5878         return 0;
5879 }
5880
5881 static int
5882 hns3_reinit_dev(struct hns3_adapter *hns)
5883 {
5884         struct hns3_hw *hw = &hns->hw;
5885         int ret;
5886
5887         ret = hns3_cmd_init(hw);
5888         if (ret) {
5889                 hns3_err(hw, "Failed to init cmd: %d", ret);
5890                 return ret;
5891         }
5892
5893         ret = hns3_reset_all_tqps(hns);
5894         if (ret) {
5895                 hns3_err(hw, "Failed to reset all queues: %d", ret);
5896                 return ret;
5897         }
5898
5899         ret = hns3_init_hardware(hns);
5900         if (ret) {
5901                 hns3_err(hw, "Failed to init hardware: %d", ret);
5902                 return ret;
5903         }
5904
5905         ret = hns3_enable_hw_error_intr(hns, true);
5906         if (ret) {
5907                 hns3_err(hw, "fail to enable hw error interrupts: %d",
5908                              ret);
5909                 return ret;
5910         }
5911         hns3_info(hw, "Reset done, driver initialization finished.");
5912
5913         return 0;
5914 }
5915
5916 static bool
5917 is_pf_reset_done(struct hns3_hw *hw)
5918 {
5919         uint32_t val, reg, reg_bit;
5920
5921         switch (hw->reset.level) {
5922         case HNS3_IMP_RESET:
5923                 reg = HNS3_GLOBAL_RESET_REG;
5924                 reg_bit = HNS3_IMP_RESET_BIT;
5925                 break;
5926         case HNS3_GLOBAL_RESET:
5927                 reg = HNS3_GLOBAL_RESET_REG;
5928                 reg_bit = HNS3_GLOBAL_RESET_BIT;
5929                 break;
5930         case HNS3_FUNC_RESET:
5931                 reg = HNS3_FUN_RST_ING;
5932                 reg_bit = HNS3_FUN_RST_ING_B;
5933                 break;
5934         case HNS3_FLR_RESET:
5935         default:
5936                 hns3_err(hw, "Wait for unsupported reset level: %d",
5937                          hw->reset.level);
5938                 return true;
5939         }
5940         val = hns3_read_dev(hw, reg);
5941         if (hns3_get_bit(val, reg_bit))
5942                 return false;
5943         else
5944                 return true;
5945 }
5946
5947 bool
5948 hns3_is_reset_pending(struct hns3_adapter *hns)
5949 {
5950         struct hns3_hw *hw = &hns->hw;
5951         enum hns3_reset_level reset;
5952
5953         hns3_check_event_cause(hns, NULL);
5954         reset = hns3_get_reset_level(hns, &hw->reset.pending);
5955         if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
5956             hw->reset.level < reset) {
5957                 hns3_warn(hw, "High level reset %d is pending", reset);
5958                 return true;
5959         }
5960         reset = hns3_get_reset_level(hns, &hw->reset.request);
5961         if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
5962             hw->reset.level < reset) {
5963                 hns3_warn(hw, "High level reset %d is request", reset);
5964                 return true;
5965         }
5966         return false;
5967 }
5968
5969 static int
5970 hns3_wait_hardware_ready(struct hns3_adapter *hns)
5971 {
5972         struct hns3_hw *hw = &hns->hw;
5973         struct hns3_wait_data *wait_data = hw->reset.wait_data;
5974         struct timeval tv;
5975
5976         if (wait_data->result == HNS3_WAIT_SUCCESS)
5977                 return 0;
5978         else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
5979                 hns3_clock_gettime(&tv);
5980                 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
5981                           tv.tv_sec, tv.tv_usec);
5982                 return -ETIME;
5983         } else if (wait_data->result == HNS3_WAIT_REQUEST)
5984                 return -EAGAIN;
5985
5986         wait_data->hns = hns;
5987         wait_data->check_completion = is_pf_reset_done;
5988         wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT *
5989                                 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms();
5990         wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC;
5991         wait_data->count = HNS3_RESET_WAIT_CNT;
5992         wait_data->result = HNS3_WAIT_REQUEST;
5993         rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
5994         return -EAGAIN;
5995 }
5996
5997 static int
5998 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id)
5999 {
6000         struct hns3_cmd_desc desc;
6001         struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data;
6002
6003         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
6004         hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1);
6005         req->fun_reset_vfid = func_id;
6006
6007         return hns3_cmd_send(hw, &desc, 1);
6008 }
6009
6010 static int
6011 hns3_imp_reset_cmd(struct hns3_hw *hw)
6012 {
6013         struct hns3_cmd_desc desc;
6014
6015         hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false);
6016         desc.data[0] = 0xeedd;
6017
6018         return hns3_cmd_send(hw, &desc, 1);
6019 }
6020
6021 static void
6022 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level)
6023 {
6024         struct hns3_hw *hw = &hns->hw;
6025         struct timeval tv;
6026         uint32_t val;
6027
6028         hns3_clock_gettime(&tv);
6029         if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) ||
6030             hns3_read_dev(hw, HNS3_FUN_RST_ING)) {
6031                 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld",
6032                           tv.tv_sec, tv.tv_usec);
6033                 return;
6034         }
6035
6036         switch (reset_level) {
6037         case HNS3_IMP_RESET:
6038                 hns3_imp_reset_cmd(hw);
6039                 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld",
6040                           tv.tv_sec, tv.tv_usec);
6041                 break;
6042         case HNS3_GLOBAL_RESET:
6043                 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
6044                 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1);
6045                 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val);
6046                 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld",
6047                           tv.tv_sec, tv.tv_usec);
6048                 break;
6049         case HNS3_FUNC_RESET:
6050                 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld",
6051                           tv.tv_sec, tv.tv_usec);
6052                 /* schedule again to check later */
6053                 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending);
6054                 hns3_schedule_reset(hns);
6055                 break;
6056         default:
6057                 hns3_warn(hw, "Unsupported reset level: %d", reset_level);
6058                 return;
6059         }
6060         hns3_atomic_clear_bit(reset_level, &hw->reset.request);
6061 }
6062
6063 static enum hns3_reset_level
6064 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
6065 {
6066         struct hns3_hw *hw = &hns->hw;
6067         enum hns3_reset_level reset_level = HNS3_NONE_RESET;
6068
6069         /* Return the highest priority reset level amongst all */
6070         if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels))
6071                 reset_level = HNS3_IMP_RESET;
6072         else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
6073                 reset_level = HNS3_GLOBAL_RESET;
6074         else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
6075                 reset_level = HNS3_FUNC_RESET;
6076         else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
6077                 reset_level = HNS3_FLR_RESET;
6078
6079         if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
6080                 return HNS3_NONE_RESET;
6081
6082         return reset_level;
6083 }
6084
6085 static void
6086 hns3_record_imp_error(struct hns3_adapter *hns)
6087 {
6088         struct hns3_hw *hw = &hns->hw;
6089         uint32_t reg_val;
6090
6091         reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
6092         if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
6093                 hns3_warn(hw, "Detected IMP RD poison!");
6094                 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
6095                 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
6096         }
6097
6098         if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
6099                 hns3_warn(hw, "Detected IMP CMDQ error!");
6100                 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
6101                 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
6102         }
6103 }
6104
6105 static int
6106 hns3_prepare_reset(struct hns3_adapter *hns)
6107 {
6108         struct hns3_hw *hw = &hns->hw;
6109         uint32_t reg_val;
6110         int ret;
6111
6112         switch (hw->reset.level) {
6113         case HNS3_FUNC_RESET:
6114                 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID);
6115                 if (ret)
6116                         return ret;
6117
6118                 /*
6119                  * After performaning pf reset, it is not necessary to do the
6120                  * mailbox handling or send any command to firmware, because
6121                  * any mailbox handling or command to firmware is only valid
6122                  * after hns3_cmd_init is called.
6123                  */
6124                 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
6125                 hw->reset.stats.request_cnt++;
6126                 break;
6127         case HNS3_IMP_RESET:
6128                 hns3_record_imp_error(hns);
6129                 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
6130                 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
6131                                BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
6132                 break;
6133         default:
6134                 break;
6135         }
6136         return 0;
6137 }
6138
6139 static int
6140 hns3_set_rst_done(struct hns3_hw *hw)
6141 {
6142         struct hns3_pf_rst_done_cmd *req;
6143         struct hns3_cmd_desc desc;
6144
6145         req = (struct hns3_pf_rst_done_cmd *)desc.data;
6146         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
6147         req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
6148         return hns3_cmd_send(hw, &desc, 1);
6149 }
6150
6151 static int
6152 hns3_stop_service(struct hns3_adapter *hns)
6153 {
6154         struct hns3_hw *hw = &hns->hw;
6155         struct rte_eth_dev *eth_dev;
6156
6157         eth_dev = &rte_eth_devices[hw->data->port_id];
6158         hw->mac.link_status = RTE_ETH_LINK_DOWN;
6159         if (hw->adapter_state == HNS3_NIC_STARTED) {
6160                 rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
6161                 hns3_update_linkstatus_and_event(hw, false);
6162         }
6163
6164         hns3_set_rxtx_function(eth_dev);
6165         rte_wmb();
6166         /* Disable datapath on secondary process. */
6167         hns3_mp_req_stop_rxtx(eth_dev);
6168         rte_delay_ms(hw->cfg_max_queues);
6169
6170         rte_spinlock_lock(&hw->lock);
6171         if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
6172             hw->adapter_state == HNS3_NIC_STOPPING) {
6173                 hns3_enable_all_queues(hw, false);
6174                 hns3_do_stop(hns);
6175                 hw->reset.mbuf_deferred_free = true;
6176         } else
6177                 hw->reset.mbuf_deferred_free = false;
6178
6179         /*
6180          * It is cumbersome for hardware to pick-and-choose entries for deletion
6181          * from table space. Hence, for function reset software intervention is
6182          * required to delete the entries
6183          */
6184         if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
6185                 hns3_configure_all_mc_mac_addr(hns, true);
6186         rte_spinlock_unlock(&hw->lock);
6187
6188         return 0;
6189 }
6190
6191 static int
6192 hns3_start_service(struct hns3_adapter *hns)
6193 {
6194         struct hns3_hw *hw = &hns->hw;
6195         struct rte_eth_dev *eth_dev;
6196
6197         if (hw->reset.level == HNS3_IMP_RESET ||
6198             hw->reset.level == HNS3_GLOBAL_RESET)
6199                 hns3_set_rst_done(hw);
6200         eth_dev = &rte_eth_devices[hw->data->port_id];
6201         hns3_set_rxtx_function(eth_dev);
6202         hns3_mp_req_start_rxtx(eth_dev);
6203         if (hw->adapter_state == HNS3_NIC_STARTED) {
6204                 /*
6205                  * This API parent function already hold the hns3_hw.lock, the
6206                  * hns3_service_handler may report lse, in bonding application
6207                  * it will call driver's ops which may acquire the hns3_hw.lock
6208                  * again, thus lead to deadlock.
6209                  * We defer calls hns3_service_handler to avoid the deadlock.
6210                  */
6211                 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
6212                                   hns3_service_handler, eth_dev);
6213
6214                 /* Enable interrupt of all rx queues before enabling queues */
6215                 hns3_dev_all_rx_queue_intr_enable(hw, true);
6216                 /*
6217                  * Enable state of each rxq and txq will be recovered after
6218                  * reset, so we need to restore them before enable all tqps;
6219                  */
6220                 hns3_restore_tqp_enable_state(hw);
6221                 /*
6222                  * When finished the initialization, enable queues to receive
6223                  * and transmit packets.
6224                  */
6225                 hns3_enable_all_queues(hw, true);
6226         }
6227
6228         return 0;
6229 }
6230
6231 static int
6232 hns3_restore_conf(struct hns3_adapter *hns)
6233 {
6234         struct hns3_hw *hw = &hns->hw;
6235         int ret;
6236
6237         ret = hns3_configure_all_mac_addr(hns, false);
6238         if (ret)
6239                 return ret;
6240
6241         ret = hns3_configure_all_mc_mac_addr(hns, false);
6242         if (ret)
6243                 goto err_mc_mac;
6244
6245         ret = hns3_dev_promisc_restore(hns);
6246         if (ret)
6247                 goto err_promisc;
6248
6249         ret = hns3_restore_vlan_table(hns);
6250         if (ret)
6251                 goto err_promisc;
6252
6253         ret = hns3_restore_vlan_conf(hns);
6254         if (ret)
6255                 goto err_promisc;
6256
6257         ret = hns3_restore_all_fdir_filter(hns);
6258         if (ret)
6259                 goto err_promisc;
6260
6261         ret = hns3_restore_ptp(hns);
6262         if (ret)
6263                 goto err_promisc;
6264
6265         ret = hns3_restore_rx_interrupt(hw);
6266         if (ret)
6267                 goto err_promisc;
6268
6269         ret = hns3_restore_gro_conf(hw);
6270         if (ret)
6271                 goto err_promisc;
6272
6273         ret = hns3_restore_fec(hw);
6274         if (ret)
6275                 goto err_promisc;
6276
6277         if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
6278                 ret = hns3_do_start(hns, false);
6279                 if (ret)
6280                         goto err_promisc;
6281                 hns3_info(hw, "hns3 dev restart successful!");
6282         } else if (hw->adapter_state == HNS3_NIC_STOPPING)
6283                 hw->adapter_state = HNS3_NIC_CONFIGURED;
6284         return 0;
6285
6286 err_promisc:
6287         hns3_configure_all_mc_mac_addr(hns, true);
6288 err_mc_mac:
6289         hns3_configure_all_mac_addr(hns, true);
6290         return ret;
6291 }
6292
6293 static void
6294 hns3_reset_service(void *param)
6295 {
6296         struct hns3_adapter *hns = (struct hns3_adapter *)param;
6297         struct hns3_hw *hw = &hns->hw;
6298         enum hns3_reset_level reset_level;
6299         struct timeval tv_delta;
6300         struct timeval tv_start;
6301         struct timeval tv;
6302         uint64_t msec;
6303         int ret;
6304
6305         /*
6306          * The interrupt is not triggered within the delay time.
6307          * The interrupt may have been lost. It is necessary to handle
6308          * the interrupt to recover from the error.
6309          */
6310         if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
6311                             SCHEDULE_DEFERRED) {
6312                 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
6313                                   __ATOMIC_RELAXED);
6314                 hns3_err(hw, "Handling interrupts in delayed tasks");
6315                 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
6316                 reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
6317                 if (reset_level == HNS3_NONE_RESET) {
6318                         hns3_err(hw, "No reset level is set, try IMP reset");
6319                         hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
6320                 }
6321         }
6322         __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
6323
6324         /*
6325          * Check if there is any ongoing reset in the hardware. This status can
6326          * be checked from reset_pending. If there is then, we need to wait for
6327          * hardware to complete reset.
6328          *    a. If we are able to figure out in reasonable time that hardware
6329          *       has fully resetted then, we can proceed with driver, client
6330          *       reset.
6331          *    b. else, we can come back later to check this status so re-sched
6332          *       now.
6333          */
6334         reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
6335         if (reset_level != HNS3_NONE_RESET) {
6336                 hns3_clock_gettime(&tv_start);
6337                 ret = hns3_reset_process(hns, reset_level);
6338                 hns3_clock_gettime(&tv);
6339                 timersub(&tv, &tv_start, &tv_delta);
6340                 msec = hns3_clock_calctime_ms(&tv_delta);
6341                 if (msec > HNS3_RESET_PROCESS_MS)
6342                         hns3_err(hw, "%d handle long time delta %" PRIu64
6343                                      " ms time=%ld.%.6ld",
6344                                  hw->reset.level, msec,
6345                                  tv.tv_sec, tv.tv_usec);
6346                 if (ret == -EAGAIN)
6347                         return;
6348         }
6349
6350         /* Check if we got any *new* reset requests to be honored */
6351         reset_level = hns3_get_reset_level(hns, &hw->reset.request);
6352         if (reset_level != HNS3_NONE_RESET)
6353                 hns3_msix_process(hns, reset_level);
6354 }
6355
6356 static unsigned int
6357 hns3_get_speed_capa_num(uint16_t device_id)
6358 {
6359         unsigned int num;
6360
6361         switch (device_id) {
6362         case HNS3_DEV_ID_25GE:
6363         case HNS3_DEV_ID_25GE_RDMA:
6364                 num = 2;
6365                 break;
6366         case HNS3_DEV_ID_100G_RDMA_MACSEC:
6367         case HNS3_DEV_ID_200G_RDMA:
6368                 num = 1;
6369                 break;
6370         default:
6371                 num = 0;
6372                 break;
6373         }
6374
6375         return num;
6376 }
6377
6378 static int
6379 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa,
6380                         uint16_t device_id)
6381 {
6382         switch (device_id) {
6383         case HNS3_DEV_ID_25GE:
6384         /* fallthrough */
6385         case HNS3_DEV_ID_25GE_RDMA:
6386                 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed;
6387                 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa;
6388
6389                 /* In HNS3 device, the 25G NIC is compatible with 10G rate */
6390                 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed;
6391                 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa;
6392                 break;
6393         case HNS3_DEV_ID_100G_RDMA_MACSEC:
6394                 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed;
6395                 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa;
6396                 break;
6397         case HNS3_DEV_ID_200G_RDMA:
6398                 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed;
6399                 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa;
6400                 break;
6401         default:
6402                 return -ENOTSUP;
6403         }
6404
6405         return 0;
6406 }
6407
6408 static int
6409 hns3_fec_get_capability(struct rte_eth_dev *dev,
6410                         struct rte_eth_fec_capa *speed_fec_capa,
6411                         unsigned int num)
6412 {
6413         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6414         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
6415         uint16_t device_id = pci_dev->id.device_id;
6416         unsigned int capa_num;
6417         int ret;
6418
6419         capa_num = hns3_get_speed_capa_num(device_id);
6420         if (capa_num == 0) {
6421                 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD",
6422                          device_id);
6423                 return -ENOTSUP;
6424         }
6425
6426         if (speed_fec_capa == NULL || num < capa_num)
6427                 return capa_num;
6428
6429         ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id);
6430         if (ret)
6431                 return -ENOTSUP;
6432
6433         return capa_num;
6434 }
6435
6436 static int
6437 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
6438 {
6439         struct hns3_config_fec_cmd *req;
6440         struct hns3_cmd_desc desc;
6441         int ret;
6442
6443         /*
6444          * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported
6445          * in device of link speed
6446          * below 10 Gbps.
6447          */
6448         if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
6449                 *state = 0;
6450                 return 0;
6451         }
6452
6453         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
6454         req = (struct hns3_config_fec_cmd *)desc.data;
6455         ret = hns3_cmd_send(hw, &desc, 1);
6456         if (ret) {
6457                 hns3_err(hw, "get current fec auto state failed, ret = %d",
6458                          ret);
6459                 return ret;
6460         }
6461
6462         *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B);
6463         return 0;
6464 }
6465
6466 static int
6467 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
6468 {
6469         struct hns3_sfp_info_cmd *resp;
6470         uint32_t tmp_fec_capa;
6471         uint8_t auto_state;
6472         struct hns3_cmd_desc desc;
6473         int ret;
6474
6475         /*
6476          * If link is down and AUTO is enabled, AUTO is returned, otherwise,
6477          * configured FEC mode is returned.
6478          * If link is up, current FEC mode is returned.
6479          */
6480         if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
6481                 ret = get_current_fec_auto_state(hw, &auto_state);
6482                 if (ret)
6483                         return ret;
6484
6485                 if (auto_state == 0x1) {
6486                         *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
6487                         return 0;
6488                 }
6489         }
6490
6491         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
6492         resp = (struct hns3_sfp_info_cmd *)desc.data;
6493         resp->query_type = HNS3_ACTIVE_QUERY;
6494
6495         ret = hns3_cmd_send(hw, &desc, 1);
6496         if (ret == -EOPNOTSUPP) {
6497                 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret);
6498                 return ret;
6499         } else if (ret) {
6500                 hns3_err(hw, "get FEC failed, ret = %d", ret);
6501                 return ret;
6502         }
6503
6504         /*
6505          * FEC mode order defined in hns3 hardware is inconsistend with
6506          * that defined in the ethdev library. So the sequence needs
6507          * to be converted.
6508          */
6509         switch (resp->active_fec) {
6510         case HNS3_HW_FEC_MODE_NOFEC:
6511                 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
6512                 break;
6513         case HNS3_HW_FEC_MODE_BASER:
6514                 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
6515                 break;
6516         case HNS3_HW_FEC_MODE_RS:
6517                 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
6518                 break;
6519         default:
6520                 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
6521                 break;
6522         }
6523
6524         *fec_capa = tmp_fec_capa;
6525         return 0;
6526 }
6527
6528 static int
6529 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
6530 {
6531         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6532
6533         return hns3_fec_get_internal(hw, fec_capa);
6534 }
6535
6536 static int
6537 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
6538 {
6539         struct hns3_config_fec_cmd *req;
6540         struct hns3_cmd_desc desc;
6541         int ret;
6542
6543         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false);
6544
6545         req = (struct hns3_config_fec_cmd *)desc.data;
6546         switch (mode) {
6547         case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC):
6548                 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6549                                 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF);
6550                 break;
6551         case RTE_ETH_FEC_MODE_CAPA_MASK(BASER):
6552                 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6553                                 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER);
6554                 break;
6555         case RTE_ETH_FEC_MODE_CAPA_MASK(RS):
6556                 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6557                                 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS);
6558                 break;
6559         case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO):
6560                 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1);
6561                 break;
6562         default:
6563                 return 0;
6564         }
6565         ret = hns3_cmd_send(hw, &desc, 1);
6566         if (ret)
6567                 hns3_err(hw, "set fec mode failed, ret = %d", ret);
6568
6569         return ret;
6570 }
6571
6572 static uint32_t
6573 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
6574 {
6575         struct hns3_mac *mac = &hw->mac;
6576         uint32_t cur_capa;
6577
6578         switch (mac->link_speed) {
6579         case RTE_ETH_SPEED_NUM_10G:
6580                 cur_capa = fec_capa[1].capa;
6581                 break;
6582         case RTE_ETH_SPEED_NUM_25G:
6583         case RTE_ETH_SPEED_NUM_100G:
6584         case RTE_ETH_SPEED_NUM_200G:
6585                 cur_capa = fec_capa[0].capa;
6586                 break;
6587         default:
6588                 cur_capa = 0;
6589                 break;
6590         }
6591
6592         return cur_capa;
6593 }
6594
6595 static bool
6596 is_fec_mode_one_bit_set(uint32_t mode)
6597 {
6598         int cnt = 0;
6599         uint8_t i;
6600
6601         for (i = 0; i < sizeof(mode); i++)
6602                 if (mode >> i & 0x1)
6603                         cnt++;
6604
6605         return cnt == 1 ? true : false;
6606 }
6607
6608 static int
6609 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
6610 {
6611 #define FEC_CAPA_NUM 2
6612         struct hns3_adapter *hns = dev->data->dev_private;
6613         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6614         struct hns3_pf *pf = &hns->pf;
6615
6616         struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
6617         uint32_t cur_capa;
6618         uint32_t num = FEC_CAPA_NUM;
6619         int ret;
6620
6621         ret = hns3_fec_get_capability(dev, fec_capa, num);
6622         if (ret < 0)
6623                 return ret;
6624
6625         /* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */
6626         if (!is_fec_mode_one_bit_set(mode)) {
6627                 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
6628                              "FEC mode should be only one bit set", mode);
6629                 return -EINVAL;
6630         }
6631
6632         /*
6633          * Check whether the configured mode is within the FEC capability.
6634          * If not, the configured mode will not be supported.
6635          */
6636         cur_capa = get_current_speed_fec_cap(hw, fec_capa);
6637         if (!(cur_capa & mode)) {
6638                 hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
6639                 return -EINVAL;
6640         }
6641
6642         rte_spinlock_lock(&hw->lock);
6643         ret = hns3_set_fec_hw(hw, mode);
6644         if (ret) {
6645                 rte_spinlock_unlock(&hw->lock);
6646                 return ret;
6647         }
6648
6649         pf->fec_mode = mode;
6650         rte_spinlock_unlock(&hw->lock);
6651
6652         return 0;
6653 }
6654
6655 static int
6656 hns3_restore_fec(struct hns3_hw *hw)
6657 {
6658         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
6659         struct hns3_pf *pf = &hns->pf;
6660         uint32_t mode = pf->fec_mode;
6661         int ret;
6662
6663         ret = hns3_set_fec_hw(hw, mode);
6664         if (ret)
6665                 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d",
6666                          mode, ret);
6667
6668         return ret;
6669 }
6670
6671 static int
6672 hns3_query_dev_fec_info(struct hns3_hw *hw)
6673 {
6674         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
6675         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns);
6676         int ret;
6677
6678         ret = hns3_fec_get_internal(hw, &pf->fec_mode);
6679         if (ret)
6680                 hns3_err(hw, "query device FEC info failed, ret = %d", ret);
6681
6682         return ret;
6683 }
6684
6685 static bool
6686 hns3_optical_module_existed(struct hns3_hw *hw)
6687 {
6688         struct hns3_cmd_desc desc;
6689         bool existed;
6690         int ret;
6691
6692         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true);
6693         ret = hns3_cmd_send(hw, &desc, 1);
6694         if (ret) {
6695                 hns3_err(hw,
6696                          "fail to get optical module exist state, ret = %d.\n",
6697                          ret);
6698                 return false;
6699         }
6700         existed = !!desc.data[0];
6701
6702         return existed;
6703 }
6704
6705 static int
6706 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset,
6707                                 uint32_t len, uint8_t *data)
6708 {
6709 #define HNS3_SFP_INFO_CMD_NUM 6
6710 #define HNS3_SFP_INFO_MAX_LEN \
6711         (HNS3_SFP_INFO_BD0_LEN + \
6712         (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN)
6713         struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM];
6714         struct hns3_sfp_info_bd0_cmd *sfp_info_bd0;
6715         uint16_t read_len;
6716         uint16_t copy_len;
6717         int ret;
6718         int i;
6719
6720         for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) {
6721                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM,
6722                                           true);
6723                 if (i < HNS3_SFP_INFO_CMD_NUM - 1)
6724                         desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
6725         }
6726
6727         sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data;
6728         sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset);
6729         read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN);
6730         sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len);
6731
6732         ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM);
6733         if (ret) {
6734                 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n",
6735                                 ret);
6736                 return ret;
6737         }
6738
6739         /* The data format in BD0 is different with the others. */
6740         copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN);
6741         memcpy(data, sfp_info_bd0->data, copy_len);
6742         read_len = copy_len;
6743
6744         for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) {
6745                 if (read_len >= len)
6746                         break;
6747
6748                 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN);
6749                 memcpy(data + read_len, desc[i].data, copy_len);
6750                 read_len += copy_len;
6751         }
6752
6753         return (int)read_len;
6754 }
6755
6756 static int
6757 hns3_get_module_eeprom(struct rte_eth_dev *dev,
6758                        struct rte_dev_eeprom_info *info)
6759 {
6760         struct hns3_adapter *hns = dev->data->dev_private;
6761         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6762         uint32_t offset = info->offset;
6763         uint32_t len = info->length;
6764         uint8_t *data = info->data;
6765         uint32_t read_len = 0;
6766
6767         if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER)
6768                 return -ENOTSUP;
6769
6770         if (!hns3_optical_module_existed(hw)) {
6771                 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n");
6772                 return -EIO;
6773         }
6774
6775         while (read_len < len) {
6776                 int ret;
6777                 ret = hns3_get_module_eeprom_data(hw, offset + read_len,
6778                                                   len - read_len,
6779                                                   data + read_len);
6780                 if (ret < 0)
6781                         return -EIO;
6782                 read_len += ret;
6783         }
6784
6785         return 0;
6786 }
6787
6788 static int
6789 hns3_get_module_info(struct rte_eth_dev *dev,
6790                      struct rte_eth_dev_module_info *modinfo)
6791 {
6792 #define HNS3_SFF8024_ID_SFP             0x03
6793 #define HNS3_SFF8024_ID_QSFP_8438       0x0c
6794 #define HNS3_SFF8024_ID_QSFP_8436_8636  0x0d
6795 #define HNS3_SFF8024_ID_QSFP28_8636     0x11
6796 #define HNS3_SFF_8636_V1_3              0x03
6797         struct hns3_adapter *hns = dev->data->dev_private;
6798         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6799         struct rte_dev_eeprom_info info;
6800         struct hns3_sfp_type sfp_type;
6801         int ret;
6802
6803         memset(&sfp_type, 0, sizeof(sfp_type));
6804         memset(&info, 0, sizeof(info));
6805         info.data = (uint8_t *)&sfp_type;
6806         info.length = sizeof(sfp_type);
6807         ret = hns3_get_module_eeprom(dev, &info);
6808         if (ret)
6809                 return ret;
6810
6811         switch (sfp_type.type) {
6812         case HNS3_SFF8024_ID_SFP:
6813                 modinfo->type = RTE_ETH_MODULE_SFF_8472;
6814                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
6815                 break;
6816         case HNS3_SFF8024_ID_QSFP_8438:
6817                 modinfo->type = RTE_ETH_MODULE_SFF_8436;
6818                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
6819                 break;
6820         case HNS3_SFF8024_ID_QSFP_8436_8636:
6821                 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
6822                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
6823                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
6824                 } else {
6825                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
6826                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
6827                 }
6828                 break;
6829         case HNS3_SFF8024_ID_QSFP28_8636:
6830                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
6831                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
6832                 break;
6833         default:
6834                 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n",
6835                          sfp_type.type, sfp_type.ext_type);
6836                 return -EINVAL;
6837         }
6838
6839         return 0;
6840 }
6841
6842 static const struct eth_dev_ops hns3_eth_dev_ops = {
6843         .dev_configure      = hns3_dev_configure,
6844         .dev_start          = hns3_dev_start,
6845         .dev_stop           = hns3_dev_stop,
6846         .dev_close          = hns3_dev_close,
6847         .promiscuous_enable = hns3_dev_promiscuous_enable,
6848         .promiscuous_disable = hns3_dev_promiscuous_disable,
6849         .allmulticast_enable  = hns3_dev_allmulticast_enable,
6850         .allmulticast_disable = hns3_dev_allmulticast_disable,
6851         .mtu_set            = hns3_dev_mtu_set,
6852         .stats_get          = hns3_stats_get,
6853         .stats_reset        = hns3_stats_reset,
6854         .xstats_get         = hns3_dev_xstats_get,
6855         .xstats_get_names   = hns3_dev_xstats_get_names,
6856         .xstats_reset       = hns3_dev_xstats_reset,
6857         .xstats_get_by_id   = hns3_dev_xstats_get_by_id,
6858         .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
6859         .dev_infos_get          = hns3_dev_infos_get,
6860         .fw_version_get         = hns3_fw_version_get,
6861         .rx_queue_setup         = hns3_rx_queue_setup,
6862         .tx_queue_setup         = hns3_tx_queue_setup,
6863         .rx_queue_release       = hns3_dev_rx_queue_release,
6864         .tx_queue_release       = hns3_dev_tx_queue_release,
6865         .rx_queue_start         = hns3_dev_rx_queue_start,
6866         .rx_queue_stop          = hns3_dev_rx_queue_stop,
6867         .tx_queue_start         = hns3_dev_tx_queue_start,
6868         .tx_queue_stop          = hns3_dev_tx_queue_stop,
6869         .rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
6870         .rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
6871         .rxq_info_get           = hns3_rxq_info_get,
6872         .txq_info_get           = hns3_txq_info_get,
6873         .rx_burst_mode_get      = hns3_rx_burst_mode_get,
6874         .tx_burst_mode_get      = hns3_tx_burst_mode_get,
6875         .flow_ctrl_get          = hns3_flow_ctrl_get,
6876         .flow_ctrl_set          = hns3_flow_ctrl_set,
6877         .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,
6878         .mac_addr_add           = hns3_add_mac_addr,
6879         .mac_addr_remove        = hns3_remove_mac_addr,
6880         .mac_addr_set           = hns3_set_default_mac_addr,
6881         .set_mc_addr_list       = hns3_set_mc_mac_addr_list,
6882         .link_update            = hns3_dev_link_update,
6883         .dev_set_link_up        = hns3_dev_set_link_up,
6884         .dev_set_link_down      = hns3_dev_set_link_down,
6885         .rss_hash_update        = hns3_dev_rss_hash_update,
6886         .rss_hash_conf_get      = hns3_dev_rss_hash_conf_get,
6887         .reta_update            = hns3_dev_rss_reta_update,
6888         .reta_query             = hns3_dev_rss_reta_query,
6889         .flow_ops_get           = hns3_dev_flow_ops_get,
6890         .vlan_filter_set        = hns3_vlan_filter_set,
6891         .vlan_tpid_set          = hns3_vlan_tpid_set,
6892         .vlan_offload_set       = hns3_vlan_offload_set,
6893         .vlan_pvid_set          = hns3_vlan_pvid_set,
6894         .get_reg                = hns3_get_regs,
6895         .get_module_info        = hns3_get_module_info,
6896         .get_module_eeprom      = hns3_get_module_eeprom,
6897         .get_dcb_info           = hns3_get_dcb_info,
6898         .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
6899         .fec_get_capability     = hns3_fec_get_capability,
6900         .fec_get                = hns3_fec_get,
6901         .fec_set                = hns3_fec_set,
6902         .tm_ops_get             = hns3_tm_ops_get,
6903         .tx_done_cleanup        = hns3_tx_done_cleanup,
6904         .timesync_enable            = hns3_timesync_enable,
6905         .timesync_disable           = hns3_timesync_disable,
6906         .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp,
6907         .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp,
6908         .timesync_adjust_time       = hns3_timesync_adjust_time,
6909         .timesync_read_time         = hns3_timesync_read_time,
6910         .timesync_write_time        = hns3_timesync_write_time,
6911 };
6912
6913 static const struct hns3_reset_ops hns3_reset_ops = {
6914         .reset_service       = hns3_reset_service,
6915         .stop_service        = hns3_stop_service,
6916         .prepare_reset       = hns3_prepare_reset,
6917         .wait_hardware_ready = hns3_wait_hardware_ready,
6918         .reinit_dev          = hns3_reinit_dev,
6919         .restore_conf        = hns3_restore_conf,
6920         .start_service       = hns3_start_service,
6921 };
6922
6923 static void
6924 hns3_init_hw_ops(struct hns3_hw *hw)
6925 {
6926         hw->ops.add_mc_mac_addr = hns3_add_mc_mac_addr;
6927         hw->ops.del_mc_mac_addr = hns3_remove_mc_mac_addr;
6928         hw->ops.add_uc_mac_addr = hns3_add_uc_mac_addr;
6929         hw->ops.del_uc_mac_addr = hns3_remove_uc_mac_addr;
6930 }
6931
6932 static int
6933 hns3_dev_init(struct rte_eth_dev *eth_dev)
6934 {
6935         struct hns3_adapter *hns = eth_dev->data->dev_private;
6936         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
6937         struct rte_ether_addr *eth_addr;
6938         struct hns3_hw *hw = &hns->hw;
6939         int ret;
6940
6941         PMD_INIT_FUNC_TRACE();
6942
6943         hns3_flow_init(eth_dev);
6944
6945         hns3_set_rxtx_function(eth_dev);
6946         eth_dev->dev_ops = &hns3_eth_dev_ops;
6947         eth_dev->rx_queue_count = hns3_rx_queue_count;
6948         ret = hns3_mp_init(eth_dev);
6949         if (ret)
6950                 goto err_mp_init;
6951
6952         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
6953                 hns3_tx_push_init(eth_dev);
6954                 return 0;
6955         }
6956
6957         hw->adapter_state = HNS3_NIC_UNINITIALIZED;
6958         hns->is_vf = false;
6959         hw->data = eth_dev->data;
6960         hns3_parse_devargs(eth_dev);
6961
6962         /*
6963          * Set default max packet size according to the mtu
6964          * default vale in DPDK frame.
6965          */
6966         hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
6967
6968         ret = hns3_reset_init(hw);
6969         if (ret)
6970                 goto err_init_reset;
6971         hw->reset.ops = &hns3_reset_ops;
6972
6973         hns3_init_hw_ops(hw);
6974         ret = hns3_init_pf(eth_dev);
6975         if (ret) {
6976                 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
6977                 goto err_init_pf;
6978         }
6979
6980         /* Allocate memory for storing MAC addresses */
6981         eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac",
6982                                                sizeof(struct rte_ether_addr) *
6983                                                HNS3_UC_MACADDR_NUM, 0);
6984         if (eth_dev->data->mac_addrs == NULL) {
6985                 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
6986                              "to store MAC addresses",
6987                              sizeof(struct rte_ether_addr) *
6988                              HNS3_UC_MACADDR_NUM);
6989                 ret = -ENOMEM;
6990                 goto err_rte_zmalloc;
6991         }
6992
6993         eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
6994         if (!rte_is_valid_assigned_ether_addr(eth_addr)) {
6995                 rte_eth_random_addr(hw->mac.mac_addr);
6996                 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
6997                                 (struct rte_ether_addr *)hw->mac.mac_addr);
6998                 hns3_warn(hw, "default mac_addr from firmware is an invalid "
6999                           "unicast address, using random MAC address %s",
7000                           mac_str);
7001         }
7002         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
7003                             &eth_dev->data->mac_addrs[0]);
7004
7005         hw->adapter_state = HNS3_NIC_INITIALIZED;
7006
7007         if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
7008                             SCHEDULE_PENDING) {
7009                 hns3_err(hw, "Reschedule reset service after dev_init");
7010                 hns3_schedule_reset(hns);
7011         } else {
7012                 /* IMP will wait ready flag before reset */
7013                 hns3_notify_reset_ready(hw, false);
7014         }
7015
7016         hns3_info(hw, "hns3 dev initialization successful!");
7017         return 0;
7018
7019 err_rte_zmalloc:
7020         hns3_uninit_pf(eth_dev);
7021
7022 err_init_pf:
7023         rte_free(hw->reset.wait_data);
7024
7025 err_init_reset:
7026         hns3_mp_uninit(eth_dev);
7027
7028 err_mp_init:
7029         eth_dev->dev_ops = NULL;
7030         eth_dev->rx_pkt_burst = NULL;
7031         eth_dev->rx_descriptor_status = NULL;
7032         eth_dev->tx_pkt_burst = NULL;
7033         eth_dev->tx_pkt_prepare = NULL;
7034         eth_dev->tx_descriptor_status = NULL;
7035         return ret;
7036 }
7037
7038 static int
7039 hns3_dev_uninit(struct rte_eth_dev *eth_dev)
7040 {
7041         struct hns3_adapter *hns = eth_dev->data->dev_private;
7042         struct hns3_hw *hw = &hns->hw;
7043
7044         PMD_INIT_FUNC_TRACE();
7045
7046         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
7047                 hns3_mp_uninit(eth_dev);
7048                 return 0;
7049         }
7050
7051         if (hw->adapter_state < HNS3_NIC_CLOSING)
7052                 hns3_dev_close(eth_dev);
7053
7054         hw->adapter_state = HNS3_NIC_REMOVED;
7055         return 0;
7056 }
7057
7058 static int
7059 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
7060                    struct rte_pci_device *pci_dev)
7061 {
7062         return rte_eth_dev_pci_generic_probe(pci_dev,
7063                                              sizeof(struct hns3_adapter),
7064                                              hns3_dev_init);
7065 }
7066
7067 static int
7068 eth_hns3_pci_remove(struct rte_pci_device *pci_dev)
7069 {
7070         return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit);
7071 }
7072
7073 static const struct rte_pci_id pci_id_hns3_map[] = {
7074         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) },
7075         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) },
7076         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) },
7077         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
7078         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
7079         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) },
7080         { .vendor_id = 0, }, /* sentinel */
7081 };
7082
7083 static struct rte_pci_driver rte_hns3_pmd = {
7084         .id_table = pci_id_hns3_map,
7085         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
7086         .probe = eth_hns3_pci_probe,
7087         .remove = eth_hns3_pci_remove,
7088 };
7089
7090 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
7091 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
7092 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
7093 RTE_PMD_REGISTER_PARAM_STRING(net_hns3,
7094                 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
7095                 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
7096                 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
7097                 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16> ");
7098 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE);
7099 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE);