net/hns3: add start/stop and configure operations
[dpdk.git] / drivers / net / hns3 / hns3_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <errno.h>
6 #include <stdarg.h>
7 #include <stdbool.h>
8 #include <stdio.h>
9 #include <stdint.h>
10 #include <inttypes.h>
11 #include <unistd.h>
12 #include <rte_bus_pci.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
15 #include <rte_dev.h>
16 #include <rte_eal.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_ethdev_pci.h>
20 #include <rte_io.h>
21 #include <rte_log.h>
22 #include <rte_pci.h>
23
24 #include "hns3_ethdev.h"
25 #include "hns3_logs.h"
26 #include "hns3_rxtx.h"
27 #include "hns3_regs.h"
28 #include "hns3_dcb.h"
29
30 #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE       32
31 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM       1
32
33 #define HNS3_SERVICE_INTERVAL           1000000 /* us */
34 #define HNS3_PORT_BASE_VLAN_DISABLE     0
35 #define HNS3_PORT_BASE_VLAN_ENABLE      1
36 #define HNS3_INVLID_PVID                0xFFFF
37
38 #define HNS3_FILTER_TYPE_VF             0
39 #define HNS3_FILTER_TYPE_PORT           1
40 #define HNS3_FILTER_FE_EGRESS_V1_B      BIT(0)
41 #define HNS3_FILTER_FE_NIC_INGRESS_B    BIT(0)
42 #define HNS3_FILTER_FE_NIC_EGRESS_B     BIT(1)
43 #define HNS3_FILTER_FE_ROCE_INGRESS_B   BIT(2)
44 #define HNS3_FILTER_FE_ROCE_EGRESS_B    BIT(3)
45 #define HNS3_FILTER_FE_EGRESS           (HNS3_FILTER_FE_NIC_EGRESS_B \
46                                         | HNS3_FILTER_FE_ROCE_EGRESS_B)
47 #define HNS3_FILTER_FE_INGRESS          (HNS3_FILTER_FE_NIC_INGRESS_B \
48                                         | HNS3_FILTER_FE_ROCE_INGRESS_B)
49
50 int hns3_logtype_init;
51 int hns3_logtype_driver;
52
53 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
54 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
55                                     int on);
56
57 static int
58 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on)
59 {
60 #define HNS3_VLAN_OFFSET_160            160
61         struct hns3_vlan_filter_pf_cfg_cmd *req;
62         struct hns3_hw *hw = &hns->hw;
63         uint8_t vlan_offset_byte_val;
64         struct hns3_cmd_desc desc;
65         uint8_t vlan_offset_byte;
66         uint8_t vlan_offset_160;
67         int ret;
68
69         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false);
70
71         vlan_offset_160 = vlan_id / HNS3_VLAN_OFFSET_160;
72         vlan_offset_byte = (vlan_id % HNS3_VLAN_OFFSET_160) / 8;
73         vlan_offset_byte_val = 1 << (vlan_id % 8);
74
75         req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data;
76         req->vlan_offset = vlan_offset_160;
77         req->vlan_cfg = on ? 0 : 1;
78         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
79
80         ret = hns3_cmd_send(hw, &desc, 1);
81         if (ret)
82                 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d",
83                          vlan_id, ret);
84
85         return ret;
86 }
87
88 static void
89 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id)
90 {
91         struct hns3_user_vlan_table *vlan_entry;
92         struct hns3_pf *pf = &hns->pf;
93
94         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
95                 if (vlan_entry->vlan_id == vlan_id) {
96                         if (vlan_entry->hd_tbl_status)
97                                 hns3_set_port_vlan_filter(hns, vlan_id, 0);
98                         LIST_REMOVE(vlan_entry, next);
99                         rte_free(vlan_entry);
100                         break;
101                 }
102         }
103 }
104
105 static void
106 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
107                         bool writen_to_tbl)
108 {
109         struct hns3_user_vlan_table *vlan_entry;
110         struct hns3_hw *hw = &hns->hw;
111         struct hns3_pf *pf = &hns->pf;
112
113         vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0);
114         if (vlan_entry == NULL) {
115                 hns3_err(hw, "Failed to malloc hns3 vlan table");
116                 return;
117         }
118
119         vlan_entry->hd_tbl_status = writen_to_tbl;
120         vlan_entry->vlan_id = vlan_id;
121
122         LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);
123 }
124
125 static int
126 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
127 {
128         struct hns3_pf *pf = &hns->pf;
129         bool writen_to_tbl = false;
130         int ret = 0;
131
132         /*
133          * When vlan filter is enabled, hardware regards vlan id 0 as the entry
134          * for normal packet, deleting vlan id 0 is not allowed.
135          */
136         if (on == 0 && vlan_id == 0)
137                 return 0;
138
139         /*
140          * When port base vlan enabled, we use port base vlan as the vlan
141          * filter condition. In this case, we don't update vlan filter table
142          * when user add new vlan or remove exist vlan, just update the
143          * vlan list. The vlan id in vlan list will be writen in vlan filter
144          * table until port base vlan disabled
145          */
146         if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
147                 ret = hns3_set_port_vlan_filter(hns, vlan_id, on);
148                 writen_to_tbl = true;
149         }
150
151         if (ret == 0 && vlan_id) {
152                 if (on)
153                         hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);
154                 else
155                         hns3_rm_dev_vlan_table(hns, vlan_id);
156         }
157         return ret;
158 }
159
160 static int
161 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
162 {
163         struct hns3_adapter *hns = dev->data->dev_private;
164         struct hns3_hw *hw = &hns->hw;
165         int ret;
166
167         rte_spinlock_lock(&hw->lock);
168         ret = hns3_vlan_filter_configure(hns, vlan_id, on);
169         rte_spinlock_unlock(&hw->lock);
170         return ret;
171 }
172
173 static int
174 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
175                          uint16_t tpid)
176 {
177         struct hns3_rx_vlan_type_cfg_cmd *rx_req;
178         struct hns3_tx_vlan_type_cfg_cmd *tx_req;
179         struct hns3_hw *hw = &hns->hw;
180         struct hns3_cmd_desc desc;
181         int ret;
182
183         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
184              vlan_type != ETH_VLAN_TYPE_OUTER)) {
185                 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
186                 return -EINVAL;
187         }
188
189         if (tpid != RTE_ETHER_TYPE_VLAN) {
190                 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type);
191                 return -EINVAL;
192         }
193
194         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
195         rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
196
197         if (vlan_type == ETH_VLAN_TYPE_OUTER) {
198                 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
199                 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
200         } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
201                 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
202                 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
203                 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
204                 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid);
205         }
206
207         ret = hns3_cmd_send(hw, &desc, 1);
208         if (ret) {
209                 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d",
210                          ret);
211                 return ret;
212         }
213
214         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false);
215
216         tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data;
217         tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid);
218         tx_req->in_vlan_type = rte_cpu_to_le_16(tpid);
219
220         ret = hns3_cmd_send(hw, &desc, 1);
221         if (ret)
222                 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d",
223                          ret);
224         return ret;
225 }
226
227 static int
228 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
229                    uint16_t tpid)
230 {
231         struct hns3_adapter *hns = dev->data->dev_private;
232         struct hns3_hw *hw = &hns->hw;
233         int ret;
234
235         rte_spinlock_lock(&hw->lock);
236         ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid);
237         rte_spinlock_unlock(&hw->lock);
238         return ret;
239 }
240
241 static int
242 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
243                              struct hns3_rx_vtag_cfg *vcfg)
244 {
245         struct hns3_vport_vtag_rx_cfg_cmd *req;
246         struct hns3_hw *hw = &hns->hw;
247         struct hns3_cmd_desc desc;
248         uint16_t vport_id;
249         uint8_t bitmap;
250         int ret;
251
252         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false);
253
254         req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data;
255         hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B,
256                      vcfg->strip_tag1_en ? 1 : 0);
257         hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B,
258                      vcfg->strip_tag2_en ? 1 : 0);
259         hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B,
260                      vcfg->vlan1_vlan_prionly ? 1 : 0);
261         hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
262                      vcfg->vlan2_vlan_prionly ? 1 : 0);
263
264         /*
265          * In current version VF is not supported when PF is driven by DPDK
266          * driver, the PF-related vf_id is 0, just need to configure parameters
267          * for vport_id 0.
268          */
269         vport_id = 0;
270         req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
271         bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
272         req->vf_bitmap[req->vf_offset] = bitmap;
273
274         ret = hns3_cmd_send(hw, &desc, 1);
275         if (ret)
276                 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret);
277         return ret;
278 }
279
280 static void
281 hns3_update_rx_offload_cfg(struct hns3_adapter *hns,
282                            struct hns3_rx_vtag_cfg *vcfg)
283 {
284         struct hns3_pf *pf = &hns->pf;
285         memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg));
286 }
287
288 static void
289 hns3_update_tx_offload_cfg(struct hns3_adapter *hns,
290                            struct hns3_tx_vtag_cfg *vcfg)
291 {
292         struct hns3_pf *pf = &hns->pf;
293         memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg));
294 }
295
296 static int
297 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
298 {
299         struct hns3_rx_vtag_cfg rxvlan_cfg;
300         struct hns3_pf *pf = &hns->pf;
301         struct hns3_hw *hw = &hns->hw;
302         int ret;
303
304         if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
305                 rxvlan_cfg.strip_tag1_en = false;
306                 rxvlan_cfg.strip_tag2_en = enable;
307         } else {
308                 rxvlan_cfg.strip_tag1_en = enable;
309                 rxvlan_cfg.strip_tag2_en = true;
310         }
311
312         rxvlan_cfg.vlan1_vlan_prionly = false;
313         rxvlan_cfg.vlan2_vlan_prionly = false;
314         rxvlan_cfg.rx_vlan_offload_en = enable;
315
316         ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
317         if (ret) {
318                 hns3_err(hw, "enable strip rx vtag failed, ret =%d", ret);
319                 return ret;
320         }
321
322         hns3_update_rx_offload_cfg(hns, &rxvlan_cfg);
323
324         return ret;
325 }
326
327 static int
328 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type,
329                           uint8_t fe_type, bool filter_en, uint8_t vf_id)
330 {
331         struct hns3_vlan_filter_ctrl_cmd *req;
332         struct hns3_cmd_desc desc;
333         int ret;
334
335         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false);
336
337         req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data;
338         req->vlan_type = vlan_type;
339         req->vlan_fe = filter_en ? fe_type : 0;
340         req->vf_id = vf_id;
341
342         ret = hns3_cmd_send(hw, &desc, 1);
343         if (ret)
344                 hns3_err(hw, "set vlan filter fail, ret =%d", ret);
345
346         return ret;
347 }
348
349 static int
350 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable)
351 {
352         struct hns3_hw *hw = &hns->hw;
353         int ret;
354
355         ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF,
356                                         HNS3_FILTER_FE_EGRESS, false, 0);
357         if (ret) {
358                 hns3_err(hw, "hns3 enable filter fail, ret =%d", ret);
359                 return ret;
360         }
361
362         ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
363                                         HNS3_FILTER_FE_INGRESS, enable, 0);
364         if (ret)
365                 hns3_err(hw, "hns3 enable filter fail, ret =%d", ret);
366
367         return ret;
368 }
369
370 static int
371 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
372 {
373         struct hns3_adapter *hns = dev->data->dev_private;
374         struct hns3_hw *hw = &hns->hw;
375         struct rte_eth_rxmode *rxmode;
376         unsigned int tmp_mask;
377         bool enable;
378         int ret = 0;
379
380         rte_spinlock_lock(&hw->lock);
381         rxmode = &dev->data->dev_conf.rxmode;
382         tmp_mask = (unsigned int)mask;
383         if (tmp_mask & ETH_VLAN_STRIP_MASK) {
384                 /* Enable or disable VLAN stripping */
385                 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
386                     true : false;
387
388                 ret = hns3_en_hw_strip_rxvtag(hns, enable);
389                 if (ret) {
390                         rte_spinlock_unlock(&hw->lock);
391                         hns3_err(hw, "failed to enable rx strip, ret =%d", ret);
392                         return ret;
393                 }
394         }
395
396         rte_spinlock_unlock(&hw->lock);
397
398         return ret;
399 }
400
401 static int
402 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
403                              struct hns3_tx_vtag_cfg *vcfg)
404 {
405         struct hns3_vport_vtag_tx_cfg_cmd *req;
406         struct hns3_cmd_desc desc;
407         struct hns3_hw *hw = &hns->hw;
408         uint16_t vport_id;
409         uint8_t bitmap;
410         int ret;
411
412         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false);
413
414         req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data;
415         req->def_vlan_tag1 = vcfg->default_tag1;
416         req->def_vlan_tag2 = vcfg->default_tag2;
417         hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B,
418                      vcfg->accept_tag1 ? 1 : 0);
419         hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B,
420                      vcfg->accept_untag1 ? 1 : 0);
421         hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B,
422                      vcfg->accept_tag2 ? 1 : 0);
423         hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B,
424                      vcfg->accept_untag2 ? 1 : 0);
425         hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B,
426                      vcfg->insert_tag1_en ? 1 : 0);
427         hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B,
428                      vcfg->insert_tag2_en ? 1 : 0);
429         hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
430
431         /*
432          * In current version VF is not supported when PF is driven by DPDK
433          * driver, the PF-related vf_id is 0, just need to configure parameters
434          * for vport_id 0.
435          */
436         vport_id = 0;
437         req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
438         bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
439         req->vf_bitmap[req->vf_offset] = bitmap;
440
441         ret = hns3_cmd_send(hw, &desc, 1);
442         if (ret)
443                 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret);
444
445         return ret;
446 }
447
448 static int
449 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state,
450                      uint16_t pvid)
451 {
452         struct hns3_hw *hw = &hns->hw;
453         struct hns3_tx_vtag_cfg txvlan_cfg;
454         int ret;
455
456         if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) {
457                 txvlan_cfg.accept_tag1 = true;
458                 txvlan_cfg.insert_tag1_en = false;
459                 txvlan_cfg.default_tag1 = 0;
460         } else {
461                 txvlan_cfg.accept_tag1 = false;
462                 txvlan_cfg.insert_tag1_en = true;
463                 txvlan_cfg.default_tag1 = pvid;
464         }
465
466         txvlan_cfg.accept_untag1 = true;
467         txvlan_cfg.accept_tag2 = true;
468         txvlan_cfg.accept_untag2 = true;
469         txvlan_cfg.insert_tag2_en = false;
470         txvlan_cfg.default_tag2 = 0;
471
472         ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg);
473         if (ret) {
474                 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid,
475                          ret);
476                 return ret;
477         }
478
479         hns3_update_tx_offload_cfg(hns, &txvlan_cfg);
480         return ret;
481 }
482
483 static void
484 hns3_store_port_base_vlan_info(struct hns3_adapter *hns, uint16_t pvid, int on)
485 {
486         struct hns3_pf *pf = &hns->pf;
487
488         pf->port_base_vlan_cfg.state = on ?
489             HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
490
491         pf->port_base_vlan_cfg.pvid = pvid;
492 }
493
494 static void
495 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list)
496 {
497         struct hns3_user_vlan_table *vlan_entry;
498         struct hns3_pf *pf = &hns->pf;
499
500         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
501                 if (vlan_entry->hd_tbl_status)
502                         hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0);
503
504                 vlan_entry->hd_tbl_status = false;
505         }
506
507         if (is_del_list) {
508                 vlan_entry = LIST_FIRST(&pf->vlan_list);
509                 while (vlan_entry) {
510                         LIST_REMOVE(vlan_entry, next);
511                         rte_free(vlan_entry);
512                         vlan_entry = LIST_FIRST(&pf->vlan_list);
513                 }
514         }
515 }
516
517 static void
518 hns3_add_all_vlan_table(struct hns3_adapter *hns)
519 {
520         struct hns3_user_vlan_table *vlan_entry;
521         struct hns3_pf *pf = &hns->pf;
522
523         LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
524                 if (!vlan_entry->hd_tbl_status)
525                         hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1);
526
527                 vlan_entry->hd_tbl_status = true;
528         }
529 }
530
531 static void
532 hns3_remove_all_vlan_table(struct hns3_adapter *hns)
533 {
534         struct hns3_hw *hw = &hns->hw;
535         struct hns3_pf *pf = &hns->pf;
536         int ret;
537
538         hns3_rm_all_vlan_table(hns, true);
539         if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) {
540                 ret = hns3_set_port_vlan_filter(hns,
541                                                 pf->port_base_vlan_cfg.pvid, 0);
542                 if (ret) {
543                         hns3_err(hw, "Failed to remove all vlan table, ret =%d",
544                                  ret);
545                         return;
546                 }
547         }
548 }
549
550 static int
551 hns3_update_vlan_filter_entries(struct hns3_adapter *hns,
552                                 uint16_t port_base_vlan_state,
553                                 uint16_t new_pvid, uint16_t old_pvid)
554 {
555         struct hns3_pf *pf = &hns->pf;
556         struct hns3_hw *hw = &hns->hw;
557         int ret = 0;
558
559         if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) {
560                 if (old_pvid != HNS3_INVLID_PVID && old_pvid != 0) {
561                         ret = hns3_set_port_vlan_filter(hns, old_pvid, 0);
562                         if (ret) {
563                                 hns3_err(hw,
564                                          "Failed to clear clear old pvid filter, ret =%d",
565                                          ret);
566                                 return ret;
567                         }
568                 }
569
570                 hns3_rm_all_vlan_table(hns, false);
571                 return hns3_set_port_vlan_filter(hns, new_pvid, 1);
572         }
573
574         if (new_pvid != 0) {
575                 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0);
576                 if (ret) {
577                         hns3_err(hw, "Failed to set port vlan filter, ret =%d",
578                                  ret);
579                         return ret;
580                 }
581         }
582
583         if (new_pvid == pf->port_base_vlan_cfg.pvid)
584                 hns3_add_all_vlan_table(hns);
585
586         return ret;
587 }
588
589 static int
590 hns3_en_rx_strip_all(struct hns3_adapter *hns, int on)
591 {
592         struct hns3_rx_vtag_cfg rx_vlan_cfg;
593         struct hns3_hw *hw = &hns->hw;
594         bool rx_strip_en;
595         int ret;
596
597         rx_strip_en = on ? true : false;
598         rx_vlan_cfg.strip_tag1_en = rx_strip_en;
599         rx_vlan_cfg.strip_tag2_en = rx_strip_en;
600         rx_vlan_cfg.vlan1_vlan_prionly = false;
601         rx_vlan_cfg.vlan2_vlan_prionly = false;
602         rx_vlan_cfg.rx_vlan_offload_en = rx_strip_en;
603
604         ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg);
605         if (ret) {
606                 hns3_err(hw, "enable strip rx failed, ret =%d", ret);
607                 return ret;
608         }
609
610         hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg);
611         return ret;
612 }
613
614 static int
615 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on)
616 {
617         struct hns3_pf *pf = &hns->pf;
618         struct hns3_hw *hw = &hns->hw;
619         uint16_t port_base_vlan_state;
620         uint16_t old_pvid;
621         int ret;
622
623         if (on == 0 && pvid != pf->port_base_vlan_cfg.pvid) {
624                 if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID)
625                         hns3_warn(hw, "Invalid operation! As current pvid set "
626                                   "is %u, disable pvid %u is invalid",
627                                   pf->port_base_vlan_cfg.pvid, pvid);
628                 return 0;
629         }
630
631         port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE :
632                                     HNS3_PORT_BASE_VLAN_DISABLE;
633         ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid);
634         if (ret) {
635                 hns3_err(hw, "Failed to config tx vlan, ret =%d", ret);
636                 return ret;
637         }
638
639         ret = hns3_en_rx_strip_all(hns, on);
640         if (ret) {
641                 hns3_err(hw, "Failed to config rx vlan strip, ret =%d", ret);
642                 return ret;
643         }
644
645         if (pvid == HNS3_INVLID_PVID)
646                 goto out;
647         old_pvid = pf->port_base_vlan_cfg.pvid;
648         ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid,
649                                               old_pvid);
650         if (ret) {
651                 hns3_err(hw, "Failed to update vlan filter entries, ret =%d",
652                          ret);
653                 return ret;
654         }
655
656 out:
657         hns3_store_port_base_vlan_info(hns, pvid, on);
658         return ret;
659 }
660
661 static int
662 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
663 {
664         struct hns3_adapter *hns = dev->data->dev_private;
665         struct hns3_hw *hw = &hns->hw;
666         int ret;
667
668         rte_spinlock_lock(&hw->lock);
669         ret = hns3_vlan_pvid_configure(hns, pvid, on);
670         rte_spinlock_unlock(&hw->lock);
671         return ret;
672 }
673
674 static void
675 init_port_base_vlan_info(struct hns3_hw *hw)
676 {
677         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
678         struct hns3_pf *pf = &hns->pf;
679
680         pf->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
681         pf->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID;
682 }
683
684 static int
685 hns3_default_vlan_config(struct hns3_adapter *hns)
686 {
687         struct hns3_hw *hw = &hns->hw;
688         int ret;
689
690         ret = hns3_set_port_vlan_filter(hns, 0, 1);
691         if (ret)
692                 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret);
693         return ret;
694 }
695
696 static int
697 hns3_init_vlan_config(struct hns3_adapter *hns)
698 {
699         struct hns3_hw *hw = &hns->hw;
700         int ret;
701
702         /*
703          * This function can be called in the initialization and reset process,
704          * when in reset process, it means that hardware had been reseted
705          * successfully and we need to restore the hardware configuration to
706          * ensure that the hardware configuration remains unchanged before and
707          * after reset.
708          */
709         if (rte_atomic16_read(&hw->reset.resetting) == 0)
710                 init_port_base_vlan_info(hw);
711
712         ret = hns3_enable_vlan_filter(hns, true);
713         if (ret) {
714                 hns3_err(hw, "vlan init fail in pf, ret =%d", ret);
715                 return ret;
716         }
717
718         ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
719                                        RTE_ETHER_TYPE_VLAN);
720         if (ret) {
721                 hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
722                 return ret;
723         }
724
725         /*
726          * When in the reinit dev stage of the reset process, the following
727          * vlan-related configurations may differ from those at initialization,
728          * we will restore configurations to hardware in hns3_restore_vlan_table
729          * and hns3_restore_vlan_conf later.
730          */
731         if (rte_atomic16_read(&hw->reset.resetting) == 0) {
732                 ret = hns3_vlan_pvid_configure(hns, HNS3_INVLID_PVID, 0);
733                 if (ret) {
734                         hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
735                         return ret;
736                 }
737
738                 ret = hns3_en_hw_strip_rxvtag(hns, false);
739                 if (ret) {
740                         hns3_err(hw, "rx strip configure fail in pf, ret =%d",
741                                  ret);
742                         return ret;
743                 }
744         }
745
746         return hns3_default_vlan_config(hns);
747 }
748
749 static int
750 hns3_dev_configure_vlan(struct rte_eth_dev *dev)
751 {
752         struct hns3_adapter *hns = dev->data->dev_private;
753         struct rte_eth_dev_data *data = dev->data;
754         struct rte_eth_txmode *txmode;
755         struct hns3_hw *hw = &hns->hw;
756         int ret;
757
758         txmode = &data->dev_conf.txmode;
759         if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged)
760                 hns3_warn(hw,
761                           "hw_vlan_reject_tagged or hw_vlan_reject_untagged "
762                           "configuration is not supported! Ignore these two "
763                           "parameters: hw_vlan_reject_tagged(%d), "
764                           "hw_vlan_reject_untagged(%d)",
765                           txmode->hw_vlan_reject_tagged,
766                           txmode->hw_vlan_reject_untagged);
767
768         /* Apply vlan offload setting */
769         ret = hns3_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
770         if (ret) {
771                 hns3_err(hw, "dev config vlan Strip failed, ret =%d", ret);
772                 return ret;
773         }
774
775         /* Apply pvid setting */
776         ret = hns3_vlan_pvid_set(dev, txmode->pvid,
777                                  txmode->hw_vlan_insert_pvid);
778         if (ret)
779                 hns3_err(hw, "dev config vlan pvid(%d) failed, ret =%d",
780                          txmode->pvid, ret);
781
782         return ret;
783 }
784
785 static int
786 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
787                 unsigned int tso_mss_max)
788 {
789         struct hns3_cfg_tso_status_cmd *req;
790         struct hns3_cmd_desc desc;
791         uint16_t tso_mss;
792
793         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false);
794
795         req = (struct hns3_cfg_tso_status_cmd *)desc.data;
796
797         tso_mss = 0;
798         hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
799                        tso_mss_min);
800         req->tso_mss_min = rte_cpu_to_le_16(tso_mss);
801
802         tso_mss = 0;
803         hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
804                        tso_mss_max);
805         req->tso_mss_max = rte_cpu_to_le_16(tso_mss);
806
807         return hns3_cmd_send(hw, &desc, 1);
808 }
809
810 int
811 hns3_config_gro(struct hns3_hw *hw, bool en)
812 {
813         struct hns3_cfg_gro_status_cmd *req;
814         struct hns3_cmd_desc desc;
815         int ret;
816
817         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
818         req = (struct hns3_cfg_gro_status_cmd *)desc.data;
819
820         req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
821
822         ret = hns3_cmd_send(hw, &desc, 1);
823         if (ret)
824                 hns3_err(hw, "GRO hardware config cmd failed, ret = %d", ret);
825
826         return ret;
827 }
828
829 static int
830 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size,
831                    uint16_t *allocated_size, bool is_alloc)
832 {
833         struct hns3_umv_spc_alc_cmd *req;
834         struct hns3_cmd_desc desc;
835         int ret;
836
837         req = (struct hns3_umv_spc_alc_cmd *)desc.data;
838         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false);
839         hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1);
840         req->space_size = rte_cpu_to_le_32(space_size);
841
842         ret = hns3_cmd_send(hw, &desc, 1);
843         if (ret) {
844                 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d",
845                              is_alloc ? "allocate" : "free", ret);
846                 return ret;
847         }
848
849         if (is_alloc && allocated_size)
850                 *allocated_size = rte_le_to_cpu_32(desc.data[1]);
851
852         return 0;
853 }
854
855 static int
856 hns3_init_umv_space(struct hns3_hw *hw)
857 {
858         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
859         struct hns3_pf *pf = &hns->pf;
860         uint16_t allocated_size = 0;
861         int ret;
862
863         ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size,
864                                  true);
865         if (ret)
866                 return ret;
867
868         if (allocated_size < pf->wanted_umv_size)
869                 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u",
870                              pf->wanted_umv_size, allocated_size);
871
872         pf->max_umv_size = (!!allocated_size) ? allocated_size :
873                                                 pf->wanted_umv_size;
874         pf->used_umv_size = 0;
875         return 0;
876 }
877
878 static int
879 hns3_uninit_umv_space(struct hns3_hw *hw)
880 {
881         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
882         struct hns3_pf *pf = &hns->pf;
883         int ret;
884
885         if (pf->max_umv_size == 0)
886                 return 0;
887
888         ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false);
889         if (ret)
890                 return ret;
891
892         pf->max_umv_size = 0;
893
894         return 0;
895 }
896
897 static bool
898 hns3_is_umv_space_full(struct hns3_hw *hw)
899 {
900         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
901         struct hns3_pf *pf = &hns->pf;
902         bool is_full;
903
904         is_full = (pf->used_umv_size >= pf->max_umv_size);
905
906         return is_full;
907 }
908
909 static void
910 hns3_update_umv_space(struct hns3_hw *hw, bool is_free)
911 {
912         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
913         struct hns3_pf *pf = &hns->pf;
914
915         if (is_free) {
916                 if (pf->used_umv_size > 0)
917                         pf->used_umv_size--;
918         } else
919                 pf->used_umv_size++;
920 }
921
922 static void
923 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req,
924                       const uint8_t *addr, bool is_mc)
925 {
926         const unsigned char *mac_addr = addr;
927         uint32_t high_val = ((uint32_t)mac_addr[3] << 24) |
928                             ((uint32_t)mac_addr[2] << 16) |
929                             ((uint32_t)mac_addr[1] << 8) |
930                             (uint32_t)mac_addr[0];
931         uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4];
932
933         hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1);
934         if (is_mc) {
935                 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
936                 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1);
937                 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1);
938         }
939
940         new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val);
941         new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff);
942 }
943
944 static int
945 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp,
946                              uint8_t resp_code,
947                              enum hns3_mac_vlan_tbl_opcode op)
948 {
949         if (cmdq_resp) {
950                 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u",
951                          cmdq_resp);
952                 return -EIO;
953         }
954
955         if (op == HNS3_MAC_VLAN_ADD) {
956                 if (resp_code == 0 || resp_code == 1) {
957                         return 0;
958                 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) {
959                         hns3_err(hw, "add mac addr failed for uc_overflow");
960                         return -ENOSPC;
961                 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) {
962                         hns3_err(hw, "add mac addr failed for mc_overflow");
963                         return -ENOSPC;
964                 }
965
966                 hns3_err(hw, "add mac addr failed for undefined, code=%u",
967                          resp_code);
968                 return -EIO;
969         } else if (op == HNS3_MAC_VLAN_REMOVE) {
970                 if (resp_code == 0) {
971                         return 0;
972                 } else if (resp_code == 1) {
973                         hns3_dbg(hw, "remove mac addr failed for miss");
974                         return -ENOENT;
975                 }
976
977                 hns3_err(hw, "remove mac addr failed for undefined, code=%u",
978                          resp_code);
979                 return -EIO;
980         } else if (op == HNS3_MAC_VLAN_LKUP) {
981                 if (resp_code == 0) {
982                         return 0;
983                 } else if (resp_code == 1) {
984                         hns3_dbg(hw, "lookup mac addr failed for miss");
985                         return -ENOENT;
986                 }
987
988                 hns3_err(hw, "lookup mac addr failed for undefined, code=%u",
989                          resp_code);
990                 return -EIO;
991         }
992
993         hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u",
994                  op);
995
996         return -EINVAL;
997 }
998
999 static int
1000 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw,
1001                          struct hns3_mac_vlan_tbl_entry_cmd *req,
1002                          struct hns3_cmd_desc *desc, bool is_mc)
1003 {
1004         uint8_t resp_code;
1005         uint16_t retval;
1006         int ret;
1007
1008         hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true);
1009         if (is_mc) {
1010                 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1011                 memcpy(desc[0].data, req,
1012                            sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1013                 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD,
1014                                           true);
1015                 desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1016                 hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD,
1017                                           true);
1018                 ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM);
1019         } else {
1020                 memcpy(desc[0].data, req,
1021                        sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1022                 ret = hns3_cmd_send(hw, desc, 1);
1023         }
1024         if (ret) {
1025                 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.",
1026                          ret);
1027                 return ret;
1028         }
1029         resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1030         retval = rte_le_to_cpu_16(desc[0].retval);
1031
1032         return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1033                                             HNS3_MAC_VLAN_LKUP);
1034 }
1035
1036 static int
1037 hns3_add_mac_vlan_tbl(struct hns3_hw *hw,
1038                       struct hns3_mac_vlan_tbl_entry_cmd *req,
1039                       struct hns3_cmd_desc *mc_desc)
1040 {
1041         uint8_t resp_code;
1042         uint16_t retval;
1043         int cfg_status;
1044         int ret;
1045
1046         if (mc_desc == NULL) {
1047                 struct hns3_cmd_desc desc;
1048
1049                 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false);
1050                 memcpy(desc.data, req,
1051                        sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1052                 ret = hns3_cmd_send(hw, &desc, 1);
1053                 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1054                 retval = rte_le_to_cpu_16(desc.retval);
1055
1056                 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1057                                                           HNS3_MAC_VLAN_ADD);
1058         } else {
1059                 hns3_cmd_reuse_desc(&mc_desc[0], false);
1060                 mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1061                 hns3_cmd_reuse_desc(&mc_desc[1], false);
1062                 mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1063                 hns3_cmd_reuse_desc(&mc_desc[2], false);
1064                 mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT);
1065                 memcpy(mc_desc[0].data, req,
1066                        sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1067                 mc_desc[0].retval = 0;
1068                 ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM);
1069                 resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff;
1070                 retval = rte_le_to_cpu_16(mc_desc[0].retval);
1071
1072                 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1073                                                           HNS3_MAC_VLAN_ADD);
1074         }
1075
1076         if (ret) {
1077                 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret);
1078                 return ret;
1079         }
1080
1081         return cfg_status;
1082 }
1083
1084 static int
1085 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw,
1086                          struct hns3_mac_vlan_tbl_entry_cmd *req)
1087 {
1088         struct hns3_cmd_desc desc;
1089         uint8_t resp_code;
1090         uint16_t retval;
1091         int ret;
1092
1093         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false);
1094
1095         memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1096
1097         ret = hns3_cmd_send(hw, &desc, 1);
1098         if (ret) {
1099                 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret);
1100                 return ret;
1101         }
1102         resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1103         retval = rte_le_to_cpu_16(desc.retval);
1104
1105         return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1106                                             HNS3_MAC_VLAN_REMOVE);
1107 }
1108
1109 static int
1110 hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1111 {
1112         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1113         struct hns3_mac_vlan_tbl_entry_cmd req;
1114         struct hns3_pf *pf = &hns->pf;
1115         struct hns3_cmd_desc desc;
1116         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1117         uint16_t egress_port = 0;
1118         uint8_t vf_id;
1119         int ret;
1120
1121         /* check if mac addr is valid */
1122         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1123                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1124                                       mac_addr);
1125                 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid",
1126                          mac_str);
1127                 return -EINVAL;
1128         }
1129
1130         memset(&req, 0, sizeof(req));
1131
1132         /*
1133          * In current version VF is not supported when PF is driven by DPDK
1134          * driver, the PF-related vf_id is 0, just need to configure parameters
1135          * for vf_id 0.
1136          */
1137         vf_id = 0;
1138         hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M,
1139                        HNS3_MAC_EPORT_VFID_S, vf_id);
1140
1141         req.egress_port = rte_cpu_to_le_16(egress_port);
1142
1143         hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1144
1145         /*
1146          * Lookup the mac address in the mac_vlan table, and add
1147          * it if the entry is inexistent. Repeated unicast entry
1148          * is not allowed in the mac vlan table.
1149          */
1150         ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc, false);
1151         if (ret == -ENOENT) {
1152                 if (!hns3_is_umv_space_full(hw)) {
1153                         ret = hns3_add_mac_vlan_tbl(hw, &req, NULL);
1154                         if (!ret)
1155                                 hns3_update_umv_space(hw, false);
1156                         return ret;
1157                 }
1158
1159                 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size);
1160
1161                 return -ENOSPC;
1162         }
1163
1164         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
1165
1166         /* check if we just hit the duplicate */
1167         if (ret == 0) {
1168                 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str);
1169                 return 0;
1170         }
1171
1172         hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table",
1173                  mac_str);
1174
1175         return ret;
1176 }
1177
1178 static int
1179 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1180                   uint32_t idx, __attribute__ ((unused)) uint32_t pool)
1181 {
1182         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1183         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1184         int ret;
1185
1186         rte_spinlock_lock(&hw->lock);
1187         ret = hns3_add_uc_addr_common(hw, mac_addr);
1188         if (ret) {
1189                 rte_spinlock_unlock(&hw->lock);
1190                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1191                                       mac_addr);
1192                 hns3_err(hw, "Failed to add mac addr(%s): %d", mac_str, ret);
1193                 return ret;
1194         }
1195
1196         if (idx == 0)
1197                 hw->mac.default_addr_setted = true;
1198         rte_spinlock_unlock(&hw->lock);
1199
1200         return ret;
1201 }
1202
1203 static int
1204 hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1205 {
1206         struct hns3_mac_vlan_tbl_entry_cmd req;
1207         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1208         int ret;
1209
1210         /* check if mac addr is valid */
1211         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1212                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1213                                       mac_addr);
1214                 hns3_err(hw, "Remove unicast mac addr err! addr(%s) invalid",
1215                          mac_str);
1216                 return -EINVAL;
1217         }
1218
1219         memset(&req, 0, sizeof(req));
1220         hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1221         hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1222         ret = hns3_remove_mac_vlan_tbl(hw, &req);
1223         if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */
1224                 return 0;
1225         else if (ret == 0)
1226                 hns3_update_umv_space(hw, true);
1227
1228         return ret;
1229 }
1230
1231 static void
1232 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
1233 {
1234         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1235         /* index will be checked by upper level rte interface */
1236         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
1237         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1238         int ret;
1239
1240         rte_spinlock_lock(&hw->lock);
1241         ret = hns3_remove_uc_addr_common(hw, mac_addr);
1242         if (ret) {
1243                 rte_spinlock_unlock(&hw->lock);
1244                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1245                                       mac_addr);
1246                 hns3_err(hw, "Failed to remove mac addr(%s): %d", mac_str, ret);
1247                 return;
1248         }
1249
1250         if (idx == 0)
1251                 hw->mac.default_addr_setted = false;
1252         rte_spinlock_unlock(&hw->lock);
1253 }
1254
1255 static int
1256 hns3_set_default_mac_addr(struct rte_eth_dev *dev,
1257                           struct rte_ether_addr *mac_addr)
1258 {
1259         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1260         struct rte_ether_addr *oaddr;
1261         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1262         bool default_addr_setted;
1263         bool rm_succes = false;
1264         int ret, ret_val;
1265
1266         /* check if mac addr is valid */
1267         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1268                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1269                                       mac_addr);
1270                 hns3_err(hw, "Failed to set mac addr, addr(%s) invalid",
1271                          mac_str);
1272                 return -EINVAL;
1273         }
1274
1275         oaddr = (struct rte_ether_addr *)hw->mac.mac_addr;
1276         default_addr_setted = hw->mac.default_addr_setted;
1277         if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr))
1278                 return 0;
1279
1280         rte_spinlock_lock(&hw->lock);
1281         if (default_addr_setted) {
1282                 ret = hns3_remove_uc_addr_common(hw, oaddr);
1283                 if (ret) {
1284                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1285                                               oaddr);
1286                         hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
1287                                   mac_str, ret);
1288                         rm_succes = false;
1289                 } else
1290                         rm_succes = true;
1291         }
1292
1293         ret = hns3_add_uc_addr_common(hw, mac_addr);
1294         if (ret) {
1295                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1296                                       mac_addr);
1297                 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret);
1298                 goto err_add_uc_addr;
1299         }
1300
1301         ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes);
1302         if (ret) {
1303                 hns3_err(hw, "Failed to configure mac pause address: %d", ret);
1304                 goto err_pause_addr_cfg;
1305         }
1306
1307         rte_ether_addr_copy(mac_addr,
1308                             (struct rte_ether_addr *)hw->mac.mac_addr);
1309         hw->mac.default_addr_setted = true;
1310         rte_spinlock_unlock(&hw->lock);
1311
1312         return 0;
1313
1314 err_pause_addr_cfg:
1315         ret_val = hns3_remove_uc_addr_common(hw, mac_addr);
1316         if (ret_val) {
1317                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1318                                       mac_addr);
1319                 hns3_warn(hw,
1320                           "Failed to roll back to del setted mac addr(%s): %d",
1321                           mac_str, ret_val);
1322         }
1323
1324 err_add_uc_addr:
1325         if (rm_succes) {
1326                 ret_val = hns3_add_uc_addr_common(hw, oaddr);
1327                 if (ret_val) {
1328                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1329                                               oaddr);
1330                         hns3_warn(hw,
1331                                   "Failed to restore old uc mac addr(%s): %d",
1332                                   mac_str, ret_val);
1333                         hw->mac.default_addr_setted = false;
1334                 }
1335         }
1336         rte_spinlock_unlock(&hw->lock);
1337
1338         return ret;
1339 }
1340
1341 static int
1342 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
1343 {
1344         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1345         struct hns3_hw *hw = &hns->hw;
1346         struct rte_ether_addr *addr;
1347         int err = 0;
1348         int ret;
1349         int i;
1350
1351         for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) {
1352                 addr = &hw->data->mac_addrs[i];
1353                 if (!rte_is_valid_assigned_ether_addr(addr))
1354                         continue;
1355                 if (del)
1356                         ret = hns3_remove_uc_addr_common(hw, addr);
1357                 else
1358                         ret = hns3_add_uc_addr_common(hw, addr);
1359                 if (ret) {
1360                         err = ret;
1361                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1362                                               addr);
1363                         hns3_dbg(hw,
1364                                  "Failed to %s mac addr(%s). ret:%d i:%d",
1365                                  del ? "remove" : "restore", mac_str, ret, i);
1366                 }
1367         }
1368         return err;
1369 }
1370
1371 static void
1372 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr)
1373 {
1374 #define HNS3_VF_NUM_IN_FIRST_DESC 192
1375         uint8_t word_num;
1376         uint8_t bit_num;
1377
1378         if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) {
1379                 word_num = vfid / 32;
1380                 bit_num = vfid % 32;
1381                 if (clr)
1382                         desc[1].data[word_num] &=
1383                             rte_cpu_to_le_32(~(1UL << bit_num));
1384                 else
1385                         desc[1].data[word_num] |=
1386                             rte_cpu_to_le_32(1UL << bit_num);
1387         } else {
1388                 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32;
1389                 bit_num = vfid % 32;
1390                 if (clr)
1391                         desc[2].data[word_num] &=
1392                             rte_cpu_to_le_32(~(1UL << bit_num));
1393                 else
1394                         desc[2].data[word_num] |=
1395                             rte_cpu_to_le_32(1UL << bit_num);
1396         }
1397 }
1398
1399 static int
1400 hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1401 {
1402         struct hns3_mac_vlan_tbl_entry_cmd req;
1403         struct hns3_cmd_desc desc[3];
1404         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1405         uint8_t vf_id;
1406         int ret;
1407
1408         /* Check if mac addr is valid */
1409         if (!rte_is_multicast_ether_addr(mac_addr)) {
1410                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1411                                       mac_addr);
1412                 hns3_err(hw, "Failed to add mc mac addr, addr(%s) invalid",
1413                          mac_str);
1414                 return -EINVAL;
1415         }
1416
1417         memset(&req, 0, sizeof(req));
1418         hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1419         hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1420         ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true);
1421         if (ret) {
1422                 /* This mac addr do not exist, add new entry for it */
1423                 memset(desc[0].data, 0, sizeof(desc[0].data));
1424                 memset(desc[1].data, 0, sizeof(desc[0].data));
1425                 memset(desc[2].data, 0, sizeof(desc[0].data));
1426         }
1427
1428         /*
1429          * In current version VF is not supported when PF is driven by DPDK
1430          * driver, the PF-related vf_id is 0, just need to configure parameters
1431          * for vf_id 0.
1432          */
1433         vf_id = 0;
1434         hns3_update_desc_vfid(desc, vf_id, false);
1435         ret = hns3_add_mac_vlan_tbl(hw, &req, desc);
1436         if (ret) {
1437                 if (ret == -ENOSPC)
1438                         hns3_err(hw, "mc mac vlan table is full");
1439                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1440                                       mac_addr);
1441                 hns3_err(hw, "Failed to add mc mac addr(%s): %d", mac_str, ret);
1442         }
1443
1444         return ret;
1445 }
1446
1447 static int
1448 hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1449 {
1450         struct hns3_mac_vlan_tbl_entry_cmd req;
1451         struct hns3_cmd_desc desc[3];
1452         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1453         uint8_t vf_id;
1454         int ret;
1455
1456         /* Check if mac addr is valid */
1457         if (!rte_is_multicast_ether_addr(mac_addr)) {
1458                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1459                                       mac_addr);
1460                 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid",
1461                          mac_str);
1462                 return -EINVAL;
1463         }
1464
1465         memset(&req, 0, sizeof(req));
1466         hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1467         hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1468         ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true);
1469         if (ret == 0) {
1470                 /*
1471                  * This mac addr exist, remove this handle's VFID for it.
1472                  * In current version VF is not supported when PF is driven by
1473                  * DPDK driver, the PF-related vf_id is 0, just need to
1474                  * configure parameters for vf_id 0.
1475                  */
1476                 vf_id = 0;
1477                 hns3_update_desc_vfid(desc, vf_id, true);
1478
1479                 /* All the vfid is zero, so need to delete this entry */
1480                 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1481         } else if (ret == -ENOENT) {
1482                 /* This mac addr doesn't exist. */
1483                 return 0;
1484         }
1485
1486         if (ret) {
1487                 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1488                                       mac_addr);
1489                 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret);
1490         }
1491
1492         return ret;
1493 }
1494
1495 static int
1496 hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
1497                            struct rte_ether_addr *mc_addr_set,
1498                            uint32_t nb_mc_addr)
1499 {
1500         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1501         struct rte_ether_addr *addr;
1502         uint32_t i;
1503         uint32_t j;
1504
1505         if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
1506                 hns3_err(hw, "Failed to set mc mac addr, nb_mc_addr(%d) "
1507                          "invalid. valid range: 0~%d",
1508                          nb_mc_addr, HNS3_MC_MACADDR_NUM);
1509                 return -EINVAL;
1510         }
1511
1512         /* Check if input mac addresses are valid */
1513         for (i = 0; i < nb_mc_addr; i++) {
1514                 addr = &mc_addr_set[i];
1515                 if (!rte_is_multicast_ether_addr(addr)) {
1516                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1517                                               addr);
1518                         hns3_err(hw,
1519                                  "Failed to set mc mac addr, addr(%s) invalid.",
1520                                  mac_str);
1521                         return -EINVAL;
1522                 }
1523
1524                 /* Check if there are duplicate addresses */
1525                 for (j = i + 1; j < nb_mc_addr; j++) {
1526                         if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
1527                                 rte_ether_format_addr(mac_str,
1528                                                       RTE_ETHER_ADDR_FMT_SIZE,
1529                                                       addr);
1530                                 hns3_err(hw, "Failed to set mc mac addr, "
1531                                          "addrs invalid. two same addrs(%s).",
1532                                          mac_str);
1533                                 return -EINVAL;
1534                         }
1535                 }
1536         }
1537
1538         return 0;
1539 }
1540
1541 static void
1542 hns3_set_mc_addr_calc_addr(struct hns3_hw *hw,
1543                            struct rte_ether_addr *mc_addr_set,
1544                            int mc_addr_num,
1545                            struct rte_ether_addr *reserved_addr_list,
1546                            int *reserved_addr_num,
1547                            struct rte_ether_addr *add_addr_list,
1548                            int *add_addr_num,
1549                            struct rte_ether_addr *rm_addr_list,
1550                            int *rm_addr_num)
1551 {
1552         struct rte_ether_addr *addr;
1553         int current_addr_num;
1554         int reserved_num = 0;
1555         int add_num = 0;
1556         int rm_num = 0;
1557         int num;
1558         int i;
1559         int j;
1560         bool same_addr;
1561
1562         /* Calculate the mc mac address list that should be removed */
1563         current_addr_num = hw->mc_addrs_num;
1564         for (i = 0; i < current_addr_num; i++) {
1565                 addr = &hw->mc_addrs[i];
1566                 same_addr = false;
1567                 for (j = 0; j < mc_addr_num; j++) {
1568                         if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
1569                                 same_addr = true;
1570                                 break;
1571                         }
1572                 }
1573
1574                 if (!same_addr) {
1575                         rte_ether_addr_copy(addr, &rm_addr_list[rm_num]);
1576                         rm_num++;
1577                 } else {
1578                         rte_ether_addr_copy(addr,
1579                                             &reserved_addr_list[reserved_num]);
1580                         reserved_num++;
1581                 }
1582         }
1583
1584         /* Calculate the mc mac address list that should be added */
1585         for (i = 0; i < mc_addr_num; i++) {
1586                 addr = &mc_addr_set[i];
1587                 same_addr = false;
1588                 for (j = 0; j < current_addr_num; j++) {
1589                         if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) {
1590                                 same_addr = true;
1591                                 break;
1592                         }
1593                 }
1594
1595                 if (!same_addr) {
1596                         rte_ether_addr_copy(addr, &add_addr_list[add_num]);
1597                         add_num++;
1598                 }
1599         }
1600
1601         /* Reorder the mc mac address list maintained by driver */
1602         for (i = 0; i < reserved_num; i++)
1603                 rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]);
1604
1605         for (i = 0; i < rm_num; i++) {
1606                 num = reserved_num + i;
1607                 rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]);
1608         }
1609
1610         *reserved_addr_num = reserved_num;
1611         *add_addr_num = add_num;
1612         *rm_addr_num = rm_num;
1613 }
1614
1615 static int
1616 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev,
1617                           struct rte_ether_addr *mc_addr_set,
1618                           uint32_t nb_mc_addr)
1619 {
1620         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1621         struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM];
1622         struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM];
1623         struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM];
1624         struct rte_ether_addr *addr;
1625         int reserved_addr_num;
1626         int add_addr_num;
1627         int rm_addr_num;
1628         int mc_addr_num;
1629         int num;
1630         int ret;
1631         int i;
1632
1633         /* Check if input parameters are valid */
1634         ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
1635         if (ret)
1636                 return ret;
1637
1638         rte_spinlock_lock(&hw->lock);
1639
1640         /*
1641          * Calculate the mc mac address lists those should be removed and be
1642          * added, Reorder the mc mac address list maintained by driver.
1643          */
1644         mc_addr_num = (int)nb_mc_addr;
1645         hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num,
1646                                    reserved_addr_list, &reserved_addr_num,
1647                                    add_addr_list, &add_addr_num,
1648                                    rm_addr_list, &rm_addr_num);
1649
1650         /* Remove mc mac addresses */
1651         for (i = 0; i < rm_addr_num; i++) {
1652                 num = rm_addr_num - i - 1;
1653                 addr = &rm_addr_list[num];
1654                 ret = hns3_remove_mc_addr(hw, addr);
1655                 if (ret) {
1656                         rte_spinlock_unlock(&hw->lock);
1657                         return ret;
1658                 }
1659                 hw->mc_addrs_num--;
1660         }
1661
1662         /* Add mc mac addresses */
1663         for (i = 0; i < add_addr_num; i++) {
1664                 addr = &add_addr_list[i];
1665                 ret = hns3_add_mc_addr(hw, addr);
1666                 if (ret) {
1667                         rte_spinlock_unlock(&hw->lock);
1668                         return ret;
1669                 }
1670
1671                 num = reserved_addr_num + i;
1672                 rte_ether_addr_copy(addr, &hw->mc_addrs[num]);
1673                 hw->mc_addrs_num++;
1674         }
1675         rte_spinlock_unlock(&hw->lock);
1676
1677         return 0;
1678 }
1679
1680 static int
1681 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
1682 {
1683         char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1684         struct hns3_hw *hw = &hns->hw;
1685         struct rte_ether_addr *addr;
1686         int err = 0;
1687         int ret;
1688         int i;
1689
1690         for (i = 0; i < hw->mc_addrs_num; i++) {
1691                 addr = &hw->mc_addrs[i];
1692                 if (!rte_is_multicast_ether_addr(addr))
1693                         continue;
1694                 if (del)
1695                         ret = hns3_remove_mc_addr(hw, addr);
1696                 else
1697                         ret = hns3_add_mc_addr(hw, addr);
1698                 if (ret) {
1699                         err = ret;
1700                         rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1701                                               addr);
1702                         hns3_dbg(hw, "%s mc mac addr: %s failed",
1703                                  del ? "Remove" : "Restore", mac_str);
1704                 }
1705         }
1706         return err;
1707 }
1708
1709 static int
1710 hns3_check_mq_mode(struct rte_eth_dev *dev)
1711 {
1712         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1713         enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1714         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1715         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1716         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1717         struct rte_eth_dcb_tx_conf *dcb_tx_conf;
1718         uint8_t num_tc;
1719         int max_tc = 0;
1720         int i;
1721
1722         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1723         dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
1724
1725         if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1726                 hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB_RSS is not supported. "
1727                          "rx_mq_mode = %d", rx_mq_mode);
1728                 return -EINVAL;
1729         }
1730
1731         if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB ||
1732             tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1733                 hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB and ETH_MQ_TX_VMDQ_DCB "
1734                          "is not supported. rx_mq_mode = %d, tx_mq_mode = %d",
1735                          rx_mq_mode, tx_mq_mode);
1736                 return -EINVAL;
1737         }
1738
1739         if (rx_mq_mode == ETH_MQ_RX_DCB_RSS) {
1740                 if (dcb_rx_conf->nb_tcs > pf->tc_max) {
1741                         hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
1742                                  dcb_rx_conf->nb_tcs, pf->tc_max);
1743                         return -EINVAL;
1744                 }
1745
1746                 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
1747                       dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
1748                         hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
1749                                  "nb_tcs(%d) != %d or %d in rx direction.",
1750                                  dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
1751                         return -EINVAL;
1752                 }
1753
1754                 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
1755                         hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
1756                                  dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
1757                         return -EINVAL;
1758                 }
1759
1760                 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1761                         if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
1762                                 hns3_err(hw, "dcb_tc[%d] = %d in rx direction, "
1763                                          "is not equal to one in tx direction.",
1764                                          i, dcb_rx_conf->dcb_tc[i]);
1765                                 return -EINVAL;
1766                         }
1767                         if (dcb_rx_conf->dcb_tc[i] > max_tc)
1768                                 max_tc = dcb_rx_conf->dcb_tc[i];
1769                 }
1770
1771                 num_tc = max_tc + 1;
1772                 if (num_tc > dcb_rx_conf->nb_tcs) {
1773                         hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
1774                                  num_tc, dcb_rx_conf->nb_tcs);
1775                         return -EINVAL;
1776                 }
1777         }
1778
1779         return 0;
1780 }
1781
1782 static int
1783 hns3_check_dcb_cfg(struct rte_eth_dev *dev)
1784 {
1785         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1786
1787         if (!hns3_dev_dcb_supported(hw)) {
1788                 hns3_err(hw, "this port does not support dcb configurations.");
1789                 return -EOPNOTSUPP;
1790         }
1791
1792         if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
1793                 hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
1794                 return -EOPNOTSUPP;
1795         }
1796
1797         /* Check multiple queue mode */
1798         return hns3_check_mq_mode(dev);
1799 }
1800
1801 static int
1802 hns3_dev_configure(struct rte_eth_dev *dev)
1803 {
1804         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1805         struct hns3_rss_conf *rss_cfg = &hw->rss_info;
1806         struct rte_eth_conf *conf = &dev->data->dev_conf;
1807         enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
1808         uint16_t nb_rx_q = dev->data->nb_rx_queues;
1809         uint16_t nb_tx_q = dev->data->nb_tx_queues;
1810         struct rte_eth_rss_conf rss_conf;
1811         uint16_t mtu;
1812         int ret;
1813
1814         /*
1815          * Hardware does not support where the number of rx and tx queues is
1816          * not equal in hip08.
1817          */
1818         if (nb_rx_q != nb_tx_q) {
1819                 hns3_err(hw,
1820                          "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
1821                          "Hardware does not support this configuration!",
1822                          nb_rx_q, nb_tx_q);
1823                 return -EINVAL;
1824         }
1825
1826         if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1827                 hns3_err(hw, "setting link speed/duplex not supported");
1828                 return -EINVAL;
1829         }
1830
1831         hw->adapter_state = HNS3_NIC_CONFIGURING;
1832         if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
1833                 ret = hns3_check_dcb_cfg(dev);
1834                 if (ret)
1835                         goto cfg_err;
1836         }
1837
1838         /* When RSS is not configured, redirect the packet queue 0 */
1839         if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
1840                 rss_conf = conf->rx_adv_conf.rss_conf;
1841                 if (rss_conf.rss_key == NULL) {
1842                         rss_conf.rss_key = rss_cfg->key;
1843                         rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
1844                 }
1845
1846                 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
1847                 if (ret)
1848                         goto cfg_err;
1849         }
1850
1851         /*
1852          * If jumbo frames are enabled, MTU needs to be refreshed
1853          * according to the maximum RX packet length.
1854          */
1855         if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1856                 /*
1857                  * Security of max_rx_pkt_len is guaranteed in dpdk frame.
1858                  * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
1859                  * can safely assign to "uint16_t" type variable.
1860                  */
1861                 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
1862                 ret = hns3_dev_mtu_set(dev, mtu);
1863                 if (ret)
1864                         goto cfg_err;
1865                 dev->data->mtu = mtu;
1866         }
1867
1868         ret = hns3_dev_configure_vlan(dev);
1869         if (ret)
1870                 goto cfg_err;
1871
1872         hw->adapter_state = HNS3_NIC_CONFIGURED;
1873
1874         return 0;
1875
1876 cfg_err:
1877         hw->adapter_state = HNS3_NIC_INITIALIZED;
1878         return ret;
1879 }
1880
1881 static int
1882 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
1883 {
1884         struct hns3_config_max_frm_size_cmd *req;
1885         struct hns3_cmd_desc desc;
1886
1887         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);
1888
1889         req = (struct hns3_config_max_frm_size_cmd *)desc.data;
1890         req->max_frm_size = rte_cpu_to_le_16(new_mps);
1891         req->min_frm_size = HNS3_MIN_FRAME_LEN;
1892
1893         return hns3_cmd_send(hw, &desc, 1);
1894 }
1895
1896 static int
1897 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
1898 {
1899         int ret;
1900
1901         ret = hns3_set_mac_mtu(hw, mps);
1902         if (ret) {
1903                 hns3_err(hw, "Failed to set mtu, ret = %d", ret);
1904                 return ret;
1905         }
1906
1907         ret = hns3_buffer_alloc(hw);
1908         if (ret) {
1909                 hns3_err(hw, "Failed to allocate buffer, ret = %d", ret);
1910                 return ret;
1911         }
1912
1913         return 0;
1914 }
1915
1916 static int
1917 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1918 {
1919         struct hns3_adapter *hns = dev->data->dev_private;
1920         uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
1921         struct hns3_hw *hw = &hns->hw;
1922         bool is_jumbo_frame;
1923         int ret;
1924
1925         if (dev->data->dev_started) {
1926                 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
1927                          "before configuration", dev->data->port_id);
1928                 return -EBUSY;
1929         }
1930
1931         rte_spinlock_lock(&hw->lock);
1932         is_jumbo_frame = frame_size > RTE_ETHER_MAX_LEN ? true : false;
1933         frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
1934
1935         /*
1936          * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely
1937          * assign to "uint16_t" type variable.
1938          */
1939         ret = hns3_config_mtu(hw, (uint16_t)frame_size);
1940         if (ret) {
1941                 rte_spinlock_unlock(&hw->lock);
1942                 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d",
1943                          dev->data->port_id, mtu, ret);
1944                 return ret;
1945         }
1946         hns->pf.mps = (uint16_t)frame_size;
1947         if (is_jumbo_frame)
1948                 dev->data->dev_conf.rxmode.offloads |=
1949                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
1950         else
1951                 dev->data->dev_conf.rxmode.offloads &=
1952                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1953         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1954         rte_spinlock_unlock(&hw->lock);
1955
1956         return 0;
1957 }
1958
1959 static int
1960 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
1961 {
1962         struct hns3_adapter *hns = eth_dev->data->dev_private;
1963         struct hns3_hw *hw = &hns->hw;
1964
1965         info->max_rx_queues = hw->tqps_num;
1966         info->max_tx_queues = hw->tqps_num;
1967         info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
1968         info->min_rx_bufsize = hw->rx_buf_len;
1969         info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
1970         info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
1971         info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
1972                                  DEV_RX_OFFLOAD_TCP_CKSUM |
1973                                  DEV_RX_OFFLOAD_UDP_CKSUM |
1974                                  DEV_RX_OFFLOAD_SCTP_CKSUM |
1975                                  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1976                                  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
1977                                  DEV_RX_OFFLOAD_KEEP_CRC |
1978                                  DEV_RX_OFFLOAD_SCATTER |
1979                                  DEV_RX_OFFLOAD_VLAN_STRIP |
1980                                  DEV_RX_OFFLOAD_QINQ_STRIP |
1981                                  DEV_RX_OFFLOAD_VLAN_FILTER |
1982                                  DEV_RX_OFFLOAD_VLAN_EXTEND |
1983                                  DEV_RX_OFFLOAD_JUMBO_FRAME);
1984         info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1985         info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1986                                  DEV_TX_OFFLOAD_IPV4_CKSUM |
1987                                  DEV_TX_OFFLOAD_TCP_CKSUM |
1988                                  DEV_TX_OFFLOAD_UDP_CKSUM |
1989                                  DEV_TX_OFFLOAD_SCTP_CKSUM |
1990                                  DEV_TX_OFFLOAD_VLAN_INSERT |
1991                                  DEV_TX_OFFLOAD_QINQ_INSERT |
1992                                  DEV_TX_OFFLOAD_MULTI_SEGS |
1993                                  info->tx_queue_offload_capa);
1994
1995         info->rx_desc_lim = (struct rte_eth_desc_lim) {
1996                 .nb_max = HNS3_MAX_RING_DESC,
1997                 .nb_min = HNS3_MIN_RING_DESC,
1998                 .nb_align = HNS3_ALIGN_RING_DESC,
1999         };
2000
2001         info->tx_desc_lim = (struct rte_eth_desc_lim) {
2002                 .nb_max = HNS3_MAX_RING_DESC,
2003                 .nb_min = HNS3_MIN_RING_DESC,
2004                 .nb_align = HNS3_ALIGN_RING_DESC,
2005         };
2006
2007         info->vmdq_queue_num = 0;
2008
2009         info->reta_size = HNS3_RSS_IND_TBL_SIZE;
2010         info->hash_key_size = HNS3_RSS_KEY_SIZE;
2011         info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
2012
2013         info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
2014         info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
2015         info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
2016         info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
2017         info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
2018         info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
2019
2020         return 0;
2021 }
2022
2023 static int
2024 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
2025                     size_t fw_size)
2026 {
2027         struct hns3_adapter *hns = eth_dev->data->dev_private;
2028         struct hns3_hw *hw = &hns->hw;
2029         int ret;
2030
2031         ret = snprintf(fw_version, fw_size, "0x%08x", hw->fw_version);
2032         ret += 1; /* add the size of '\0' */
2033         if (fw_size < (uint32_t)ret)
2034                 return ret;
2035         else
2036                 return 0;
2037 }
2038
2039 static int
2040 hns3_dev_link_update(struct rte_eth_dev *eth_dev,
2041                      __rte_unused int wait_to_complete)
2042 {
2043         struct hns3_adapter *hns = eth_dev->data->dev_private;
2044         struct hns3_hw *hw = &hns->hw;
2045         struct hns3_mac *mac = &hw->mac;
2046         struct rte_eth_link new_link;
2047
2048         memset(&new_link, 0, sizeof(new_link));
2049         switch (mac->link_speed) {
2050         case ETH_SPEED_NUM_10M:
2051         case ETH_SPEED_NUM_100M:
2052         case ETH_SPEED_NUM_1G:
2053         case ETH_SPEED_NUM_10G:
2054         case ETH_SPEED_NUM_25G:
2055         case ETH_SPEED_NUM_40G:
2056         case ETH_SPEED_NUM_50G:
2057         case ETH_SPEED_NUM_100G:
2058                 new_link.link_speed = mac->link_speed;
2059                 break;
2060         default:
2061                 new_link.link_speed = ETH_SPEED_NUM_100M;
2062                 break;
2063         }
2064
2065         new_link.link_duplex = mac->link_duplex;
2066         new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
2067         new_link.link_autoneg =
2068             !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
2069
2070         return rte_eth_linkstatus_set(eth_dev, &new_link);
2071 }
2072
2073 static int
2074 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
2075 {
2076         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2077         struct hns3_pf *pf = &hns->pf;
2078
2079         if (!(status->pf_state & HNS3_PF_STATE_DONE))
2080                 return -EINVAL;
2081
2082         pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;
2083
2084         return 0;
2085 }
2086
2087 static int
2088 hns3_query_function_status(struct hns3_hw *hw)
2089 {
2090 #define HNS3_QUERY_MAX_CNT              10
2091 #define HNS3_QUERY_SLEEP_MSCOEND        1
2092         struct hns3_func_status_cmd *req;
2093         struct hns3_cmd_desc desc;
2094         int timeout = 0;
2095         int ret;
2096
2097         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);
2098         req = (struct hns3_func_status_cmd *)desc.data;
2099
2100         do {
2101                 ret = hns3_cmd_send(hw, &desc, 1);
2102                 if (ret) {
2103                         PMD_INIT_LOG(ERR, "query function status failed %d",
2104                                      ret);
2105                         return ret;
2106                 }
2107
2108                 /* Check pf reset is done */
2109                 if (req->pf_state)
2110                         break;
2111
2112                 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);
2113         } while (timeout++ < HNS3_QUERY_MAX_CNT);
2114
2115         return hns3_parse_func_status(hw, req);
2116 }
2117
2118 static int
2119 hns3_query_pf_resource(struct hns3_hw *hw)
2120 {
2121         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2122         struct hns3_pf *pf = &hns->pf;
2123         struct hns3_pf_res_cmd *req;
2124         struct hns3_cmd_desc desc;
2125         int ret;
2126
2127         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
2128         ret = hns3_cmd_send(hw, &desc, 1);
2129         if (ret) {
2130                 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret);
2131                 return ret;
2132         }
2133
2134         req = (struct hns3_pf_res_cmd *)desc.data;
2135         hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num);
2136         pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
2137         hw->tqps_num = RTE_MIN(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
2138
2139         if (req->tx_buf_size)
2140                 pf->tx_buf_size =
2141                     rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;
2142         else
2143                 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF;
2144
2145         pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);
2146
2147         if (req->dv_buf_size)
2148                 pf->dv_buf_size =
2149                     rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;
2150         else
2151                 pf->dv_buf_size = HNS3_DEFAULT_DV;
2152
2153         pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
2154
2155         hw->num_msi =
2156             hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
2157                            HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
2158
2159         return 0;
2160 }
2161
2162 static void
2163 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)
2164 {
2165         struct hns3_cfg_param_cmd *req;
2166         uint64_t mac_addr_tmp_high;
2167         uint64_t mac_addr_tmp;
2168         uint32_t i;
2169
2170         req = (struct hns3_cfg_param_cmd *)desc[0].data;
2171
2172         /* get the configuration */
2173         cfg->vmdq_vport_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2174                                              HNS3_CFG_VMDQ_M, HNS3_CFG_VMDQ_S);
2175         cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2176                                      HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
2177         cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2178                                            HNS3_CFG_TQP_DESC_N_M,
2179                                            HNS3_CFG_TQP_DESC_N_S);
2180
2181         cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2182                                        HNS3_CFG_PHY_ADDR_M,
2183                                        HNS3_CFG_PHY_ADDR_S);
2184         cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2185                                          HNS3_CFG_MEDIA_TP_M,
2186                                          HNS3_CFG_MEDIA_TP_S);
2187         cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2188                                          HNS3_CFG_RX_BUF_LEN_M,
2189                                          HNS3_CFG_RX_BUF_LEN_S);
2190         /* get mac address */
2191         mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
2192         mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2193                                            HNS3_CFG_MAC_ADDR_H_M,
2194                                            HNS3_CFG_MAC_ADDR_H_S);
2195
2196         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
2197
2198         cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2199                                             HNS3_CFG_DEFAULT_SPEED_M,
2200                                             HNS3_CFG_DEFAULT_SPEED_S);
2201         cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2202                                            HNS3_CFG_RSS_SIZE_M,
2203                                            HNS3_CFG_RSS_SIZE_S);
2204
2205         for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
2206                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
2207
2208         req = (struct hns3_cfg_param_cmd *)desc[1].data;
2209         cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);
2210
2211         cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2212                                             HNS3_CFG_SPEED_ABILITY_M,
2213                                             HNS3_CFG_SPEED_ABILITY_S);
2214         cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2215                                         HNS3_CFG_UMV_TBL_SPACE_M,
2216                                         HNS3_CFG_UMV_TBL_SPACE_S);
2217         if (!cfg->umv_space)
2218                 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;
2219 }
2220
2221 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash
2222  * @hw: pointer to struct hns3_hw
2223  * @hcfg: the config structure to be getted
2224  */
2225 static int
2226 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)
2227 {
2228         struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];
2229         struct hns3_cfg_param_cmd *req;
2230         uint32_t offset;
2231         uint32_t i;
2232         int ret;
2233
2234         for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {
2235                 offset = 0;
2236                 req = (struct hns3_cfg_param_cmd *)desc[i].data;
2237                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,
2238                                           true);
2239                 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,
2240                                i * HNS3_CFG_RD_LEN_BYTES);
2241                 /* Len should be divided by 4 when send to hardware */
2242                 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,
2243                                HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);
2244                 req->offset = rte_cpu_to_le_32(offset);
2245         }
2246
2247         ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);
2248         if (ret) {
2249                 PMD_INIT_LOG(ERR, "get config failed %d.", ret);
2250                 return ret;
2251         }
2252
2253         hns3_parse_cfg(hcfg, desc);
2254
2255         return 0;
2256 }
2257
2258 static int
2259 hns3_parse_speed(int speed_cmd, uint32_t *speed)
2260 {
2261         switch (speed_cmd) {
2262         case HNS3_CFG_SPEED_10M:
2263                 *speed = ETH_SPEED_NUM_10M;
2264                 break;
2265         case HNS3_CFG_SPEED_100M:
2266                 *speed = ETH_SPEED_NUM_100M;
2267                 break;
2268         case HNS3_CFG_SPEED_1G:
2269                 *speed = ETH_SPEED_NUM_1G;
2270                 break;
2271         case HNS3_CFG_SPEED_10G:
2272                 *speed = ETH_SPEED_NUM_10G;
2273                 break;
2274         case HNS3_CFG_SPEED_25G:
2275                 *speed = ETH_SPEED_NUM_25G;
2276                 break;
2277         case HNS3_CFG_SPEED_40G:
2278                 *speed = ETH_SPEED_NUM_40G;
2279                 break;
2280         case HNS3_CFG_SPEED_50G:
2281                 *speed = ETH_SPEED_NUM_50G;
2282                 break;
2283         case HNS3_CFG_SPEED_100G:
2284                 *speed = ETH_SPEED_NUM_100G;
2285                 break;
2286         default:
2287                 return -EINVAL;
2288         }
2289
2290         return 0;
2291 }
2292
2293 static int
2294 hns3_get_board_configuration(struct hns3_hw *hw)
2295 {
2296         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2297         struct hns3_pf *pf = &hns->pf;
2298         struct hns3_cfg cfg;
2299         int ret;
2300
2301         ret = hns3_get_board_cfg(hw, &cfg);
2302         if (ret) {
2303                 PMD_INIT_LOG(ERR, "get board config failed %d", ret);
2304                 return ret;
2305         }
2306
2307         if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER) {
2308                 PMD_INIT_LOG(ERR, "media type is copper, not supported.");
2309                 return -EOPNOTSUPP;
2310         }
2311
2312         hw->mac.media_type = cfg.media_type;
2313         hw->rss_size_max = cfg.rss_size_max;
2314         hw->rx_buf_len = cfg.rx_buf_len;
2315         memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
2316         hw->mac.phy_addr = cfg.phy_addr;
2317         hw->mac.default_addr_setted = false;
2318         hw->num_tx_desc = cfg.tqp_desc_num;
2319         hw->num_rx_desc = cfg.tqp_desc_num;
2320         hw->dcb_info.num_pg = 1;
2321         hw->dcb_info.hw_pfc_map = 0;
2322
2323         ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
2324         if (ret) {
2325                 PMD_INIT_LOG(ERR, "Get wrong speed %d, ret = %d",
2326                              cfg.default_speed, ret);
2327                 return ret;
2328         }
2329
2330         pf->tc_max = cfg.tc_num;
2331         if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) {
2332                 PMD_INIT_LOG(WARNING,
2333                              "Get TC num(%u) from flash, set TC num to 1",
2334                              pf->tc_max);
2335                 pf->tc_max = 1;
2336         }
2337
2338         /* Dev does not support DCB */
2339         if (!hns3_dev_dcb_supported(hw)) {
2340                 pf->tc_max = 1;
2341                 pf->pfc_max = 0;
2342         } else
2343                 pf->pfc_max = pf->tc_max;
2344
2345         hw->dcb_info.num_tc = 1;
2346         hw->alloc_rss_size = RTE_MIN(hw->rss_size_max,
2347                                      hw->tqps_num / hw->dcb_info.num_tc);
2348         hns3_set_bit(hw->hw_tc_map, 0, 1);
2349         pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE;
2350
2351         pf->wanted_umv_size = cfg.umv_space;
2352
2353         return ret;
2354 }
2355
2356 static int
2357 hns3_get_configuration(struct hns3_hw *hw)
2358 {
2359         int ret;
2360
2361         ret = hns3_query_function_status(hw);
2362         if (ret) {
2363                 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret);
2364                 return ret;
2365         }
2366
2367         /* Get pf resource */
2368         ret = hns3_query_pf_resource(hw);
2369         if (ret) {
2370                 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret);
2371                 return ret;
2372         }
2373
2374         ret = hns3_get_board_configuration(hw);
2375         if (ret) {
2376                 PMD_INIT_LOG(ERR, "Failed to get board configuration: %d", ret);
2377                 return ret;
2378         }
2379
2380         return 0;
2381 }
2382
2383 static int
2384 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid,
2385                       uint16_t tqp_vid, bool is_pf)
2386 {
2387         struct hns3_tqp_map_cmd *req;
2388         struct hns3_cmd_desc desc;
2389         int ret;
2390
2391         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false);
2392
2393         req = (struct hns3_tqp_map_cmd *)desc.data;
2394         req->tqp_id = rte_cpu_to_le_16(tqp_pid);
2395         req->tqp_vf = func_id;
2396         req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B;
2397         if (!is_pf)
2398                 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B);
2399         req->tqp_vid = rte_cpu_to_le_16(tqp_vid);
2400
2401         ret = hns3_cmd_send(hw, &desc, 1);
2402         if (ret)
2403                 PMD_INIT_LOG(ERR, "TQP map failed %d", ret);
2404
2405         return ret;
2406 }
2407
2408 static int
2409 hns3_map_tqp(struct hns3_hw *hw)
2410 {
2411         uint16_t tqps_num = hw->total_tqps_num;
2412         uint16_t func_id;
2413         uint16_t tqp_id;
2414         int num;
2415         int ret;
2416         int i;
2417
2418         /*
2419          * In current version VF is not supported when PF is driven by DPDK
2420          * driver, so we allocate tqps to PF as much as possible.
2421          */
2422         tqp_id = 0;
2423         num = DIV_ROUND_UP(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
2424         for (func_id = 0; func_id < num; func_id++) {
2425                 for (i = 0;
2426                      i < HNS3_MAX_TQP_NUM_PER_FUNC && tqp_id < tqps_num; i++) {
2427                         ret = hns3_map_tqps_to_func(hw, func_id, tqp_id++, i,
2428                                                     true);
2429                         if (ret)
2430                                 return ret;
2431                 }
2432         }
2433
2434         return 0;
2435 }
2436
2437 static int
2438 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
2439 {
2440         struct hns3_config_mac_speed_dup_cmd *req;
2441         struct hns3_cmd_desc desc;
2442         int ret;
2443
2444         req = (struct hns3_config_mac_speed_dup_cmd *)desc.data;
2445
2446         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false);
2447
2448         hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
2449
2450         switch (speed) {
2451         case ETH_SPEED_NUM_10M:
2452                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2453                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
2454                 break;
2455         case ETH_SPEED_NUM_100M:
2456                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2457                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
2458                 break;
2459         case ETH_SPEED_NUM_1G:
2460                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2461                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
2462                 break;
2463         case ETH_SPEED_NUM_10G:
2464                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2465                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
2466                 break;
2467         case ETH_SPEED_NUM_25G:
2468                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2469                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
2470                 break;
2471         case ETH_SPEED_NUM_40G:
2472                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2473                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
2474                 break;
2475         case ETH_SPEED_NUM_50G:
2476                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2477                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
2478                 break;
2479         case ETH_SPEED_NUM_100G:
2480                 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2481                                HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
2482                 break;
2483         default:
2484                 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed);
2485                 return -EINVAL;
2486         }
2487
2488         hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1);
2489
2490         ret = hns3_cmd_send(hw, &desc, 1);
2491         if (ret)
2492                 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret);
2493
2494         return ret;
2495 }
2496
2497 static int
2498 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
2499 {
2500         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2501         struct hns3_pf *pf = &hns->pf;
2502         struct hns3_priv_buf *priv;
2503         uint32_t i, total_size;
2504
2505         total_size = pf->pkt_buf_size;
2506
2507         /* alloc tx buffer for all enabled tc */
2508         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2509                 priv = &buf_alloc->priv_buf[i];
2510
2511                 if (hw->hw_tc_map & BIT(i)) {
2512                         if (total_size < pf->tx_buf_size)
2513                                 return -ENOMEM;
2514
2515                         priv->tx_buf_size = pf->tx_buf_size;
2516                 } else
2517                         priv->tx_buf_size = 0;
2518
2519                 total_size -= priv->tx_buf_size;
2520         }
2521
2522         return 0;
2523 }
2524
2525 static int
2526 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
2527 {
2528 /* TX buffer size is unit by 128 byte */
2529 #define HNS3_BUF_SIZE_UNIT_SHIFT        7
2530 #define HNS3_BUF_SIZE_UPDATE_EN_MSK     BIT(15)
2531         struct hns3_tx_buff_alloc_cmd *req;
2532         struct hns3_cmd_desc desc;
2533         uint32_t buf_size;
2534         uint32_t i;
2535         int ret;
2536
2537         req = (struct hns3_tx_buff_alloc_cmd *)desc.data;
2538
2539         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0);
2540         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2541                 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
2542
2543                 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT;
2544                 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size |
2545                                                 HNS3_BUF_SIZE_UPDATE_EN_MSK);
2546         }
2547
2548         ret = hns3_cmd_send(hw, &desc, 1);
2549         if (ret)
2550                 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret);
2551
2552         return ret;
2553 }
2554
2555 static int
2556 hns3_get_tc_num(struct hns3_hw *hw)
2557 {
2558         int cnt = 0;
2559         uint8_t i;
2560
2561         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
2562                 if (hw->hw_tc_map & BIT(i))
2563                         cnt++;
2564         return cnt;
2565 }
2566
2567 static uint32_t
2568 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
2569 {
2570         struct hns3_priv_buf *priv;
2571         uint32_t rx_priv = 0;
2572         int i;
2573
2574         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2575                 priv = &buf_alloc->priv_buf[i];
2576                 if (priv->enable)
2577                         rx_priv += priv->buf_size;
2578         }
2579         return rx_priv;
2580 }
2581
2582 static uint32_t
2583 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
2584 {
2585         uint32_t total_tx_size = 0;
2586         uint32_t i;
2587
2588         for (i = 0; i < HNS3_MAX_TC_NUM; i++)
2589                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2590
2591         return total_tx_size;
2592 }
2593
2594 /* Get the number of pfc enabled TCs, which have private buffer */
2595 static int
2596 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
2597 {
2598         struct hns3_priv_buf *priv;
2599         int cnt = 0;
2600         uint8_t i;
2601
2602         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2603                 priv = &buf_alloc->priv_buf[i];
2604                 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
2605                         cnt++;
2606         }
2607
2608         return cnt;
2609 }
2610
2611 /* Get the number of pfc disabled TCs, which have private buffer */
2612 static int
2613 hns3_get_no_pfc_priv_num(struct hns3_hw *hw,
2614                          struct hns3_pkt_buf_alloc *buf_alloc)
2615 {
2616         struct hns3_priv_buf *priv;
2617         int cnt = 0;
2618         uint8_t i;
2619
2620         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2621                 priv = &buf_alloc->priv_buf[i];
2622                 if (hw->hw_tc_map & BIT(i) &&
2623                     !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
2624                         cnt++;
2625         }
2626
2627         return cnt;
2628 }
2629
2630 static bool
2631 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
2632                   uint32_t rx_all)
2633 {
2634         uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2635         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2636         struct hns3_pf *pf = &hns->pf;
2637         uint32_t shared_buf, aligned_mps;
2638         uint32_t rx_priv;
2639         uint8_t tc_num;
2640         uint8_t i;
2641
2642         tc_num = hns3_get_tc_num(hw);
2643         aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
2644
2645         if (hns3_dev_dcb_supported(hw))
2646                 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
2647                                         pf->dv_buf_size;
2648         else
2649                 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF
2650                                         + pf->dv_buf_size;
2651
2652         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2653         shared_std = roundup(max_t(uint32_t, shared_buf_min, shared_buf_tc),
2654                              HNS3_BUF_SIZE_UNIT);
2655
2656         rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);
2657         if (rx_all < rx_priv + shared_std)
2658                 return false;
2659
2660         shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
2661         buf_alloc->s_buf.buf_size = shared_buf;
2662         if (hns3_dev_dcb_supported(hw)) {
2663                 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
2664                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2665                         - roundup(aligned_mps / HNS3_BUF_DIV_BY,
2666                                   HNS3_BUF_SIZE_UNIT);
2667         } else {
2668                 buf_alloc->s_buf.self.high =
2669                         aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
2670                 buf_alloc->s_buf.self.low = aligned_mps;
2671         }
2672
2673         if (hns3_dev_dcb_supported(hw)) {
2674                 hi_thrd = shared_buf - pf->dv_buf_size;
2675
2676                 if (tc_num <= NEED_RESERVE_TC_NUM)
2677                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2678                                         / BUF_MAX_PERCENT;
2679
2680                 if (tc_num)
2681                         hi_thrd = hi_thrd / tc_num;
2682
2683                 hi_thrd = max_t(uint32_t, hi_thrd,
2684                                 HNS3_BUF_MUL_BY * aligned_mps);
2685                 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);
2686                 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;
2687         } else {
2688                 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
2689                 lo_thrd = aligned_mps;
2690         }
2691
2692         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2693                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2694                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2695         }
2696
2697         return true;
2698 }
2699
2700 static bool
2701 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max,
2702                      struct hns3_pkt_buf_alloc *buf_alloc)
2703 {
2704         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2705         struct hns3_pf *pf = &hns->pf;
2706         struct hns3_priv_buf *priv;
2707         uint32_t aligned_mps;
2708         uint32_t rx_all;
2709         uint8_t i;
2710
2711         rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
2712         aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
2713
2714         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2715                 priv = &buf_alloc->priv_buf[i];
2716
2717                 priv->enable = 0;
2718                 priv->wl.low = 0;
2719                 priv->wl.high = 0;
2720                 priv->buf_size = 0;
2721
2722                 if (!(hw->hw_tc_map & BIT(i)))
2723                         continue;
2724
2725                 priv->enable = 1;
2726                 if (hw->dcb_info.hw_pfc_map & BIT(i)) {
2727                         priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT;
2728                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2729                                                 HNS3_BUF_SIZE_UNIT);
2730                 } else {
2731                         priv->wl.low = 0;
2732                         priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) :
2733                                         aligned_mps;
2734                 }
2735
2736                 priv->buf_size = priv->wl.high + pf->dv_buf_size;
2737         }
2738
2739         return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
2740 }
2741
2742 static bool
2743 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw,
2744                              struct hns3_pkt_buf_alloc *buf_alloc)
2745 {
2746         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2747         struct hns3_pf *pf = &hns->pf;
2748         struct hns3_priv_buf *priv;
2749         int no_pfc_priv_num;
2750         uint32_t rx_all;
2751         uint8_t mask;
2752         int i;
2753
2754         rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
2755         no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc);
2756
2757         /* let the last to be cleared first */
2758         for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
2759                 priv = &buf_alloc->priv_buf[i];
2760                 mask = BIT((uint8_t)i);
2761
2762                 if (hw->hw_tc_map & mask &&
2763                     !(hw->dcb_info.hw_pfc_map & mask)) {
2764                         /* Clear the no pfc TC private buffer */
2765                         priv->wl.low = 0;
2766                         priv->wl.high = 0;
2767                         priv->buf_size = 0;
2768                         priv->enable = 0;
2769                         no_pfc_priv_num--;
2770                 }
2771
2772                 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
2773                     no_pfc_priv_num == 0)
2774                         break;
2775         }
2776
2777         return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
2778 }
2779
2780 static bool
2781 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
2782                            struct hns3_pkt_buf_alloc *buf_alloc)
2783 {
2784         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2785         struct hns3_pf *pf = &hns->pf;
2786         struct hns3_priv_buf *priv;
2787         uint32_t rx_all;
2788         int pfc_priv_num;
2789         uint8_t mask;
2790         int i;
2791
2792         rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
2793         pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc);
2794
2795         /* let the last to be cleared first */
2796         for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
2797                 priv = &buf_alloc->priv_buf[i];
2798                 mask = BIT((uint8_t)i);
2799
2800                 if (hw->hw_tc_map & mask &&
2801                     hw->dcb_info.hw_pfc_map & mask) {
2802                         /* Reduce the number of pfc TC with private buffer */
2803                         priv->wl.low = 0;
2804                         priv->enable = 0;
2805                         priv->wl.high = 0;
2806                         priv->buf_size = 0;
2807                         pfc_priv_num--;
2808                 }
2809                 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
2810                     pfc_priv_num == 0)
2811                         break;
2812         }
2813
2814         return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
2815 }
2816
2817 static bool
2818 hns3_only_alloc_priv_buff(struct hns3_hw *hw,
2819                           struct hns3_pkt_buf_alloc *buf_alloc)
2820 {
2821 #define COMPENSATE_BUFFER       0x3C00
2822 #define COMPENSATE_HALF_MPS_NUM 5
2823 #define PRIV_WL_GAP             0x1800
2824         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2825         struct hns3_pf *pf = &hns->pf;
2826         uint32_t tc_num = hns3_get_tc_num(hw);
2827         uint32_t half_mps = pf->mps >> 1;
2828         struct hns3_priv_buf *priv;
2829         uint32_t min_rx_priv;
2830         uint32_t rx_priv;
2831         uint8_t i;
2832
2833         rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
2834         if (tc_num)
2835                 rx_priv = rx_priv / tc_num;
2836
2837         if (tc_num <= NEED_RESERVE_TC_NUM)
2838                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2839
2840         /*
2841          * Minimum value of private buffer in rx direction (min_rx_priv) is
2842          * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private
2843          * buffer if rx_priv is greater than min_rx_priv.
2844          */
2845         min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER +
2846                         COMPENSATE_HALF_MPS_NUM * half_mps;
2847         min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);
2848         rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);
2849
2850         if (rx_priv < min_rx_priv)
2851                 return false;
2852
2853         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2854                 priv = &buf_alloc->priv_buf[i];
2855
2856                 priv->enable = 0;
2857                 priv->wl.low = 0;
2858                 priv->wl.high = 0;
2859                 priv->buf_size = 0;
2860
2861                 if (!(hw->hw_tc_map & BIT(i)))
2862                         continue;
2863
2864                 priv->enable = 1;
2865                 priv->buf_size = rx_priv;
2866                 priv->wl.high = rx_priv - pf->dv_buf_size;
2867                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2868         }
2869
2870         buf_alloc->s_buf.buf_size = 0;
2871
2872         return true;
2873 }
2874
2875 /*
2876  * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
2877  * @hw: pointer to struct hns3_hw
2878  * @buf_alloc: pointer to buffer calculation data
2879  * @return: 0: calculate sucessful, negative: fail
2880  */
2881 static int
2882 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
2883 {
2884         /* When DCB is not supported, rx private buffer is not allocated. */
2885         if (!hns3_dev_dcb_supported(hw)) {
2886                 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2887                 struct hns3_pf *pf = &hns->pf;
2888                 uint32_t rx_all = pf->pkt_buf_size;
2889
2890                 rx_all -= hns3_get_tx_buff_alloced(buf_alloc);
2891                 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all))
2892                         return -ENOMEM;
2893
2894                 return 0;
2895         }
2896
2897         /*
2898          * Try to allocate privated packet buffer for all TCs without share
2899          * buffer.
2900          */
2901         if (hns3_only_alloc_priv_buff(hw, buf_alloc))
2902                 return 0;
2903
2904         /*
2905          * Try to allocate privated packet buffer for all TCs with share
2906          * buffer.
2907          */
2908         if (hns3_rx_buf_calc_all(hw, true, buf_alloc))
2909                 return 0;
2910
2911         /*
2912          * For different application scenes, the enabled port number, TC number
2913          * and no_drop TC number are different. In order to obtain the better
2914          * performance, software could allocate the buffer size and configure
2915          * the waterline by tring to decrease the private buffer size according
2916          * to the order, namely, waterline of valided tc, pfc disabled tc, pfc
2917          * enabled tc.
2918          */
2919         if (hns3_rx_buf_calc_all(hw, false, buf_alloc))
2920                 return 0;
2921
2922         if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc))
2923                 return 0;
2924
2925         if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc))
2926                 return 0;
2927
2928         return -ENOMEM;
2929 }
2930
2931 static int
2932 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
2933 {
2934         struct hns3_rx_priv_buff_cmd *req;
2935         struct hns3_cmd_desc desc;
2936         uint32_t buf_size;
2937         int ret;
2938         int i;
2939
2940         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false);
2941         req = (struct hns3_rx_priv_buff_cmd *)desc.data;
2942
2943         /* Alloc private buffer TCs */
2944         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2945                 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i];
2946
2947                 req->buf_num[i] =
2948                         rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S);
2949                 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B);
2950         }
2951
2952         buf_size = buf_alloc->s_buf.buf_size;
2953         req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) |
2954                                            (1 << HNS3_TC0_PRI_BUF_EN_B));
2955
2956         ret = hns3_cmd_send(hw, &desc, 1);
2957         if (ret)
2958                 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret);
2959
2960         return ret;
2961 }
2962
2963 static int
2964 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
2965 {
2966 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2
2967         struct hns3_rx_priv_wl_buf *req;
2968         struct hns3_priv_buf *priv;
2969         struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM];
2970         int i, j;
2971         int ret;
2972
2973         for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) {
2974                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC,
2975                                           false);
2976                 req = (struct hns3_rx_priv_wl_buf *)desc[i].data;
2977
2978                 /* The first descriptor set the NEXT bit to 1 */
2979                 if (i == 0)
2980                         desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
2981                 else
2982                         desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
2983
2984                 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
2985                         uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j;
2986
2987                         priv = &buf_alloc->priv_buf[idx];
2988                         req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >>
2989                                                         HNS3_BUF_UNIT_S);
2990                         req->tc_wl[j].high |=
2991                                 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
2992                         req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >>
2993                                                         HNS3_BUF_UNIT_S);
2994                         req->tc_wl[j].low |=
2995                                 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
2996                 }
2997         }
2998
2999         /* Send 2 descriptor at one time */
3000         ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM);
3001         if (ret)
3002                 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d",
3003                              ret);
3004         return ret;
3005 }
3006
3007 static int
3008 hns3_common_thrd_config(struct hns3_hw *hw,
3009                         struct hns3_pkt_buf_alloc *buf_alloc)
3010 {
3011 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2
3012         struct hns3_shared_buf *s_buf = &buf_alloc->s_buf;
3013         struct hns3_rx_com_thrd *req;
3014         struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM];
3015         struct hns3_tc_thrd *tc;
3016         int tc_idx;
3017         int i, j;
3018         int ret;
3019
3020         for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) {
3021                 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC,
3022                                           false);
3023                 req = (struct hns3_rx_com_thrd *)&desc[i].data;
3024
3025                 /* The first descriptor set the NEXT bit to 1 */
3026                 if (i == 0)
3027                         desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3028                 else
3029                         desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3030
3031                 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3032                         tc_idx = i * HNS3_TC_NUM_ONE_DESC + j;
3033                         tc = &s_buf->tc_thrd[tc_idx];
3034
3035                         req->com_thrd[j].high =
3036                                 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S);
3037                         req->com_thrd[j].high |=
3038                                  rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3039                         req->com_thrd[j].low =
3040                                 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S);
3041                         req->com_thrd[j].low |=
3042                                  rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3043                 }
3044         }
3045
3046         /* Send 2 descriptors at one time */
3047         ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM);
3048         if (ret)
3049                 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret);
3050
3051         return ret;
3052 }
3053
3054 static int
3055 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3056 {
3057         struct hns3_shared_buf *buf = &buf_alloc->s_buf;
3058         struct hns3_rx_com_wl *req;
3059         struct hns3_cmd_desc desc;
3060         int ret;
3061
3062         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false);
3063
3064         req = (struct hns3_rx_com_wl *)desc.data;
3065         req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S);
3066         req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3067
3068         req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S);
3069         req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3070
3071         ret = hns3_cmd_send(hw, &desc, 1);
3072         if (ret)
3073                 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret);
3074
3075         return ret;
3076 }
3077
3078 int
3079 hns3_buffer_alloc(struct hns3_hw *hw)
3080 {
3081         struct hns3_pkt_buf_alloc pkt_buf;
3082         int ret;
3083
3084         memset(&pkt_buf, 0, sizeof(pkt_buf));
3085         ret = hns3_tx_buffer_calc(hw, &pkt_buf);
3086         if (ret) {
3087                 PMD_INIT_LOG(ERR,
3088                              "could not calc tx buffer size for all TCs %d",
3089                              ret);
3090                 return ret;
3091         }
3092
3093         ret = hns3_tx_buffer_alloc(hw, &pkt_buf);
3094         if (ret) {
3095                 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret);
3096                 return ret;
3097         }
3098
3099         ret = hns3_rx_buffer_calc(hw, &pkt_buf);
3100         if (ret) {
3101                 PMD_INIT_LOG(ERR,
3102                              "could not calc rx priv buffer size for all TCs %d",
3103                              ret);
3104                 return ret;
3105         }
3106
3107         ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf);
3108         if (ret) {
3109                 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret);
3110                 return ret;
3111         }
3112
3113         if (hns3_dev_dcb_supported(hw)) {
3114                 ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
3115                 if (ret) {
3116                         PMD_INIT_LOG(ERR,
3117                                      "could not configure rx private waterline %d",
3118                                      ret);
3119                         return ret;
3120                 }
3121
3122                 ret = hns3_common_thrd_config(hw, &pkt_buf);
3123                 if (ret) {
3124                         PMD_INIT_LOG(ERR,
3125                                      "could not configure common threshold %d",
3126                                      ret);
3127                         return ret;
3128                 }
3129         }
3130
3131         ret = hns3_common_wl_config(hw, &pkt_buf);
3132         if (ret)
3133                 PMD_INIT_LOG(ERR, "could not configure common waterline %d",
3134                              ret);
3135
3136         return ret;
3137 }
3138
3139 static int
3140 hns3_mac_init(struct hns3_hw *hw)
3141 {
3142         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3143         struct hns3_mac *mac = &hw->mac;
3144         struct hns3_pf *pf = &hns->pf;
3145         int ret;
3146
3147         pf->support_sfp_query = true;
3148         mac->link_duplex = ETH_LINK_FULL_DUPLEX;
3149         ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
3150         if (ret) {
3151                 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
3152                 return ret;
3153         }
3154
3155         mac->link_status = ETH_LINK_DOWN;
3156
3157         return hns3_config_mtu(hw, pf->mps);
3158 }
3159
3160 static int
3161 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
3162 {
3163 #define HNS3_ETHERTYPE_SUCCESS_ADD              0
3164 #define HNS3_ETHERTYPE_ALREADY_ADD              1
3165 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW         2
3166 #define HNS3_ETHERTYPE_KEY_CONFLICT             3
3167         int return_status;
3168
3169         if (cmdq_resp) {
3170                 PMD_INIT_LOG(ERR,
3171                              "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
3172                              cmdq_resp);
3173                 return -EIO;
3174         }
3175
3176         switch (resp_code) {
3177         case HNS3_ETHERTYPE_SUCCESS_ADD:
3178         case HNS3_ETHERTYPE_ALREADY_ADD:
3179                 return_status = 0;
3180                 break;
3181         case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW:
3182                 PMD_INIT_LOG(ERR,
3183                              "add mac ethertype failed for manager table overflow.");
3184                 return_status = -EIO;
3185                 break;
3186         case HNS3_ETHERTYPE_KEY_CONFLICT:
3187                 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict.");
3188                 return_status = -EIO;
3189                 break;
3190         default:
3191                 PMD_INIT_LOG(ERR,
3192                              "add mac ethertype failed for undefined, code=%d.",
3193                              resp_code);
3194                 return_status = -EIO;
3195         }
3196
3197         return return_status;
3198 }
3199
3200 static int
3201 hns3_add_mgr_tbl(struct hns3_hw *hw,
3202                  const struct hns3_mac_mgr_tbl_entry_cmd *req)
3203 {
3204         struct hns3_cmd_desc desc;
3205         uint8_t resp_code;
3206         uint16_t retval;
3207         int ret;
3208
3209         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false);
3210         memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd));
3211
3212         ret = hns3_cmd_send(hw, &desc, 1);
3213         if (ret) {
3214                 PMD_INIT_LOG(ERR,
3215                              "add mac ethertype failed for cmd_send, ret =%d.",
3216                              ret);
3217                 return ret;
3218         }
3219
3220         resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
3221         retval = rte_le_to_cpu_16(desc.retval);
3222
3223         return hns3_get_mac_ethertype_cmd_status(retval, resp_code);
3224 }
3225
3226 static void
3227 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table,
3228                      int *table_item_num)
3229 {
3230         struct hns3_mac_mgr_tbl_entry_cmd *tbl;
3231
3232         /*
3233          * In current version, we add one item in management table as below:
3234          * 0x0180C200000E -- LLDP MC address
3235          */
3236         tbl = mgr_table;
3237         tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B;
3238         tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP);
3239         tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200));
3240         tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E));
3241         tbl->i_port_bitmap = 0x1;
3242         *table_item_num = 1;
3243 }
3244
3245 static int
3246 hns3_init_mgr_tbl(struct hns3_hw *hw)
3247 {
3248 #define HNS_MAC_MGR_TBL_MAX_SIZE        16
3249         struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE];
3250         int table_item_num;
3251         int ret;
3252         int i;
3253
3254         memset(mgr_table, 0, sizeof(mgr_table));
3255         hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
3256         for (i = 0; i < table_item_num; i++) {
3257                 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
3258                 if (ret) {
3259                         PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
3260                                      ret);
3261                         return ret;
3262                 }
3263         }
3264
3265         return 0;
3266 }
3267
3268 static void
3269 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
3270                         bool en_mc, bool en_bc, int vport_id)
3271 {
3272         if (!param)
3273                 return;
3274
3275         memset(param, 0, sizeof(struct hns3_promisc_param));
3276         if (en_uc)
3277                 param->enable = HNS3_PROMISC_EN_UC;
3278         if (en_mc)
3279                 param->enable |= HNS3_PROMISC_EN_MC;
3280         if (en_bc)
3281                 param->enable |= HNS3_PROMISC_EN_BC;
3282         param->vf_id = vport_id;
3283 }
3284
3285 static int
3286 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
3287 {
3288         struct hns3_promisc_cfg_cmd *req;
3289         struct hns3_cmd_desc desc;
3290         int ret;
3291
3292         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
3293
3294         req = (struct hns3_promisc_cfg_cmd *)desc.data;
3295         req->vf_id = param->vf_id;
3296         req->flag = (param->enable << HNS3_PROMISC_EN_B) |
3297             HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
3298
3299         ret = hns3_cmd_send(hw, &desc, 1);
3300         if (ret)
3301                 PMD_INIT_LOG(ERR, "Set promisc mode fail, status is %d", ret);
3302
3303         return ret;
3304 }
3305
3306 static int
3307 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
3308 {
3309         struct hns3_promisc_param param;
3310         bool en_bc_pmc = true;
3311         uint8_t vf_id;
3312         int ret;
3313
3314         /*
3315          * In current version VF is not supported when PF is driven by DPDK
3316          * driver, the PF-related vf_id is 0, just need to configure parameters
3317          * for vf_id 0.
3318          */
3319         vf_id = 0;
3320
3321         hns3_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
3322         ret = hns3_cmd_set_promisc_mode(hw, &param);
3323         if (ret)
3324                 return ret;
3325
3326         return 0;
3327 }
3328
3329 static int
3330 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
3331 {
3332         struct hns3_adapter *hns = dev->data->dev_private;
3333         struct hns3_hw *hw = &hns->hw;
3334         bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
3335         int ret = 0;
3336
3337         rte_spinlock_lock(&hw->lock);
3338         ret = hns3_set_promisc_mode(hw, true, en_mc_pmc);
3339         rte_spinlock_unlock(&hw->lock);
3340         if (ret)
3341                 hns3_err(hw, "Failed to enable promiscuous mode: %d", ret);
3342
3343         return ret;
3344 }
3345
3346 static int
3347 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
3348 {
3349         struct hns3_adapter *hns = dev->data->dev_private;
3350         struct hns3_hw *hw = &hns->hw;
3351         bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
3352         int ret = 0;
3353
3354         /* If now in all_multicast mode, must remain in all_multicast mode. */
3355         rte_spinlock_lock(&hw->lock);
3356         ret = hns3_set_promisc_mode(hw, false, en_mc_pmc);
3357         rte_spinlock_unlock(&hw->lock);
3358         if (ret)
3359                 hns3_err(hw, "Failed to disable promiscuous mode: %d", ret);
3360
3361         return ret;
3362 }
3363
3364 static int
3365 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
3366 {
3367         struct hns3_adapter *hns = dev->data->dev_private;
3368         struct hns3_hw *hw = &hns->hw;
3369         bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
3370         int ret = 0;
3371
3372         rte_spinlock_lock(&hw->lock);
3373         ret = hns3_set_promisc_mode(hw, en_uc_pmc, true);
3374         rte_spinlock_unlock(&hw->lock);
3375         if (ret)
3376                 hns3_err(hw, "Failed to enable allmulticast mode: %d", ret);
3377
3378         return ret;
3379 }
3380
3381 static int
3382 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
3383 {
3384         struct hns3_adapter *hns = dev->data->dev_private;
3385         struct hns3_hw *hw = &hns->hw;
3386         bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
3387         int ret = 0;
3388
3389         /* If now in promiscuous mode, must remain in all_multicast mode. */
3390         if (dev->data->promiscuous == 1)
3391                 return 0;
3392
3393         rte_spinlock_lock(&hw->lock);
3394         ret = hns3_set_promisc_mode(hw, en_uc_pmc, false);
3395         rte_spinlock_unlock(&hw->lock);
3396         if (ret)
3397                 hns3_err(hw, "Failed to disable allmulticast mode: %d", ret);
3398
3399         return ret;
3400 }
3401
3402 static int
3403 hns3_get_sfp_speed(struct hns3_hw *hw, uint32_t *speed)
3404 {
3405         struct hns3_sfp_speed_cmd *resp;
3406         struct hns3_cmd_desc desc;
3407         int ret;
3408
3409         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SFP_GET_SPEED, true);
3410         resp = (struct hns3_sfp_speed_cmd *)desc.data;
3411         ret = hns3_cmd_send(hw, &desc, 1);
3412         if (ret == -EOPNOTSUPP) {
3413                 hns3_err(hw, "IMP do not support get SFP speed %d", ret);
3414                 return ret;
3415         } else if (ret) {
3416                 hns3_err(hw, "get sfp speed failed %d", ret);
3417                 return ret;
3418         }
3419
3420         *speed = resp->sfp_speed;
3421
3422         return 0;
3423 }
3424
3425 static uint8_t
3426 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
3427 {
3428         if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
3429                 duplex = ETH_LINK_FULL_DUPLEX;
3430
3431         return duplex;
3432 }
3433
3434 static int
3435 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
3436 {
3437         struct hns3_mac *mac = &hw->mac;
3438         int ret;
3439
3440         duplex = hns3_check_speed_dup(duplex, speed);
3441         if (mac->link_speed == speed && mac->link_duplex == duplex)
3442                 return 0;
3443
3444         ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex);
3445         if (ret)
3446                 return ret;
3447
3448         mac->link_speed = speed;
3449         mac->link_duplex = duplex;
3450
3451         return 0;
3452 }
3453
3454 static int
3455 hns3_update_speed_duplex(struct rte_eth_dev *eth_dev)
3456 {
3457         struct hns3_adapter *hns = eth_dev->data->dev_private;
3458         struct hns3_hw *hw = &hns->hw;
3459         struct hns3_pf *pf = &hns->pf;
3460         uint32_t speed;
3461         int ret;
3462
3463         /* If IMP do not support get SFP/qSFP speed, return directly */
3464         if (!pf->support_sfp_query)
3465                 return 0;
3466
3467         ret = hns3_get_sfp_speed(hw, &speed);
3468         if (ret == -EOPNOTSUPP) {
3469                 pf->support_sfp_query = false;
3470                 return ret;
3471         } else if (ret)
3472                 return ret;
3473
3474         if (speed == ETH_SPEED_NUM_NONE)
3475                 return 0; /* do nothing if no SFP */
3476
3477         /* Config full duplex for SFP */
3478         return hns3_cfg_mac_speed_dup(hw, speed, ETH_LINK_FULL_DUPLEX);
3479 }
3480
3481 static int
3482 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
3483 {
3484         struct hns3_config_mac_mode_cmd *req;
3485         struct hns3_cmd_desc desc;
3486         uint32_t loop_en = 0;
3487         uint8_t val = 0;
3488         int ret;
3489
3490         req = (struct hns3_config_mac_mode_cmd *)desc.data;
3491
3492         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false);
3493         if (enable)
3494                 val = 1;
3495         hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val);
3496         hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val);
3497         hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val);
3498         hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val);
3499         hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0);
3500         hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0);
3501         hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0);
3502         hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0);
3503         hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val);
3504         hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
3505         hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
3506         hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val);
3507         hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val);
3508         hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val);
3509         req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en);
3510
3511         ret = hns3_cmd_send(hw, &desc, 1);
3512         if (ret)
3513                 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret);
3514
3515         return ret;
3516 }
3517
3518 static int
3519 hns3_get_mac_link_status(struct hns3_hw *hw)
3520 {
3521         struct hns3_link_status_cmd *req;
3522         struct hns3_cmd_desc desc;
3523         int link_status;
3524         int ret;
3525
3526         hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true);
3527         ret = hns3_cmd_send(hw, &desc, 1);
3528         if (ret) {
3529                 hns3_err(hw, "get link status cmd failed %d", ret);
3530                 return ret;
3531         }
3532
3533         req = (struct hns3_link_status_cmd *)desc.data;
3534         link_status = req->status & HNS3_LINK_STATUS_UP_M;
3535
3536         return !!link_status;
3537 }
3538
3539 static void
3540 hns3_update_link_status(struct hns3_hw *hw)
3541 {
3542         int state;
3543
3544         state = hns3_get_mac_link_status(hw);
3545         if (state != hw->mac.link_status)
3546                 hw->mac.link_status = state;
3547 }
3548
3549 static void
3550 hns3_service_handler(void *param)
3551 {
3552         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
3553         struct hns3_adapter *hns = eth_dev->data->dev_private;
3554         struct hns3_hw *hw = &hns->hw;
3555
3556         hns3_update_speed_duplex(eth_dev);
3557         hns3_update_link_status(hw);
3558
3559         rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
3560 }
3561
3562 static int
3563 hns3_init_hardware(struct hns3_adapter *hns)
3564 {
3565         struct hns3_hw *hw = &hns->hw;
3566         int ret;
3567
3568         ret = hns3_map_tqp(hw);
3569         if (ret) {
3570                 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
3571                 return ret;
3572         }
3573
3574         ret = hns3_init_umv_space(hw);
3575         if (ret) {
3576                 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret);
3577                 return ret;
3578         }
3579
3580         ret = hns3_mac_init(hw);
3581         if (ret) {
3582                 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret);
3583                 goto err_mac_init;
3584         }
3585
3586         ret = hns3_init_mgr_tbl(hw);
3587         if (ret) {
3588                 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret);
3589                 goto err_mac_init;
3590         }
3591
3592         ret = hns3_set_promisc_mode(hw, false, false);
3593         if (ret) {
3594                 PMD_INIT_LOG(ERR, "Failed to set promisc mode: %d", ret);
3595                 goto err_mac_init;
3596         }
3597
3598         ret = hns3_init_vlan_config(hns);
3599         if (ret) {
3600                 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret);
3601                 goto err_mac_init;
3602         }
3603
3604         ret = hns3_dcb_init(hw);
3605         if (ret) {
3606                 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret);
3607                 goto err_mac_init;
3608         }
3609
3610         ret = hns3_init_fd_config(hns);
3611         if (ret) {
3612                 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret);
3613                 goto err_mac_init;
3614         }
3615
3616         ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
3617         if (ret) {
3618                 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
3619                 goto err_mac_init;
3620         }
3621
3622         ret = hns3_config_gro(hw, false);
3623         if (ret) {
3624                 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
3625                 goto err_mac_init;
3626         }
3627         return 0;
3628
3629 err_mac_init:
3630         hns3_uninit_umv_space(hw);
3631         return ret;
3632 }
3633
3634 static int
3635 hns3_init_pf(struct rte_eth_dev *eth_dev)
3636 {
3637         struct rte_device *dev = eth_dev->device;
3638         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
3639         struct hns3_adapter *hns = eth_dev->data->dev_private;
3640         struct hns3_hw *hw = &hns->hw;
3641         int ret;
3642
3643         PMD_INIT_FUNC_TRACE();
3644
3645         /* Get hardware io base address from pcie BAR2 IO space */
3646         hw->io_base = pci_dev->mem_resource[2].addr;
3647
3648         /* Firmware command queue initialize */
3649         ret = hns3_cmd_init_queue(hw);
3650         if (ret) {
3651                 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
3652                 goto err_cmd_init_queue;
3653         }
3654
3655         /* Firmware command initialize */
3656         ret = hns3_cmd_init(hw);
3657         if (ret) {
3658                 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
3659                 goto err_cmd_init;
3660         }
3661
3662         /* Get configuration */
3663         ret = hns3_get_configuration(hw);
3664         if (ret) {
3665                 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
3666                 goto err_get_config;
3667         }
3668
3669         ret = hns3_init_hardware(hns);
3670         if (ret) {
3671                 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
3672                 goto err_get_config;
3673         }
3674
3675         /* Initialize flow director filter list & hash */
3676         ret = hns3_fdir_filter_init(hns);
3677         if (ret) {
3678                 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
3679                 goto err_hw_init;
3680         }
3681
3682         hns3_set_default_rss_args(hw);
3683
3684         return 0;
3685
3686 err_hw_init:
3687         hns3_uninit_umv_space(hw);
3688
3689 err_get_config:
3690         hns3_cmd_uninit(hw);
3691
3692 err_cmd_init:
3693         hns3_cmd_destroy_queue(hw);
3694
3695 err_cmd_init_queue:
3696         hw->io_base = NULL;
3697
3698         return ret;
3699 }
3700
3701 static void
3702 hns3_uninit_pf(struct rte_eth_dev *eth_dev)
3703 {
3704         struct hns3_adapter *hns = eth_dev->data->dev_private;
3705         struct hns3_hw *hw = &hns->hw;
3706
3707         PMD_INIT_FUNC_TRACE();
3708
3709         hns3_rss_uninit(hns);
3710         hns3_fdir_filter_uninit(hns);
3711         hns3_uninit_umv_space(hw);
3712         hns3_cmd_uninit(hw);
3713         hns3_cmd_destroy_queue(hw);
3714         hw->io_base = NULL;
3715 }
3716
3717 static int
3718 hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
3719 {
3720         struct hns3_hw *hw = &hns->hw;
3721         int ret;
3722
3723         ret = hns3_dcb_cfg_update(hns);
3724         if (ret)
3725                 return ret;
3726
3727         /* Enable queues */
3728         ret = hns3_start_queues(hns, reset_queue);
3729         if (ret) {
3730                 PMD_INIT_LOG(ERR, "Failed to start queues: %d", ret);
3731                 return ret;
3732         }
3733
3734         /* Enable MAC */
3735         ret = hns3_cfg_mac_mode(hw, true);
3736         if (ret) {
3737                 PMD_INIT_LOG(ERR, "Failed to enable MAC: %d", ret);
3738                 goto err_config_mac_mode;
3739         }
3740         return 0;
3741
3742 err_config_mac_mode:
3743         hns3_stop_queues(hns, true);
3744         return ret;
3745 }
3746
3747 static int
3748 hns3_dev_start(struct rte_eth_dev *eth_dev)
3749 {
3750         struct hns3_adapter *hns = eth_dev->data->dev_private;
3751         struct hns3_hw *hw = &hns->hw;
3752         int ret;
3753
3754         PMD_INIT_FUNC_TRACE();
3755
3756         rte_spinlock_lock(&hw->lock);
3757         hw->adapter_state = HNS3_NIC_STARTING;
3758
3759         ret = hns3_do_start(hns, true);
3760         if (ret) {
3761                 hw->adapter_state = HNS3_NIC_CONFIGURED;
3762                 rte_spinlock_unlock(&hw->lock);
3763                 return ret;
3764         }
3765
3766         hw->adapter_state = HNS3_NIC_STARTED;
3767         rte_spinlock_unlock(&hw->lock);
3768         hns3_set_rxtx_function(eth_dev);
3769
3770         hns3_info(hw, "hns3 dev start successful!");
3771         return 0;
3772 }
3773
3774 static int
3775 hns3_do_stop(struct hns3_adapter *hns)
3776 {
3777         struct hns3_hw *hw = &hns->hw;
3778         bool reset_queue;
3779         int ret;
3780
3781         ret = hns3_cfg_mac_mode(hw, false);
3782         if (ret)
3783                 return ret;
3784         hw->mac.link_status = ETH_LINK_DOWN;
3785
3786         hns3_configure_all_mac_addr(hns, true);
3787         reset_queue = true;
3788         hw->mac.default_addr_setted = false;
3789         return hns3_stop_queues(hns, reset_queue);
3790 }
3791
3792 static void
3793 hns3_dev_stop(struct rte_eth_dev *eth_dev)
3794 {
3795         struct hns3_adapter *hns = eth_dev->data->dev_private;
3796         struct hns3_hw *hw = &hns->hw;
3797
3798         PMD_INIT_FUNC_TRACE();
3799
3800         hw->adapter_state = HNS3_NIC_STOPPING;
3801         hns3_set_rxtx_function(eth_dev);
3802
3803         rte_spinlock_lock(&hw->lock);
3804
3805         hns3_do_stop(hns);
3806         hns3_dev_release_mbufs(hns);
3807         hw->adapter_state = HNS3_NIC_CONFIGURED;
3808         rte_spinlock_unlock(&hw->lock);
3809 }
3810
3811 static void
3812 hns3_dev_close(struct rte_eth_dev *eth_dev)
3813 {
3814         struct hns3_adapter *hns = eth_dev->data->dev_private;
3815         struct hns3_hw *hw = &hns->hw;
3816
3817         if (hw->adapter_state == HNS3_NIC_STARTED)
3818                 hns3_dev_stop(eth_dev);
3819
3820         hw->adapter_state = HNS3_NIC_CLOSING;
3821         rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
3822
3823         hns3_configure_all_mc_mac_addr(hns, true);
3824         hns3_remove_all_vlan_table(hns);
3825         hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
3826         hns3_uninit_pf(eth_dev);
3827         hns3_free_all_queues(eth_dev);
3828         rte_free(eth_dev->process_private);
3829         eth_dev->process_private = NULL;
3830         hw->adapter_state = HNS3_NIC_CLOSED;
3831         hns3_warn(hw, "Close port %d finished", hw->data->port_id);
3832 }
3833
3834 static int
3835 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3836 {
3837         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3838         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3839
3840         fc_conf->pause_time = pf->pause_time;
3841
3842         /* return fc current mode */
3843         switch (hw->current_mode) {
3844         case HNS3_FC_FULL:
3845                 fc_conf->mode = RTE_FC_FULL;
3846                 break;
3847         case HNS3_FC_TX_PAUSE:
3848                 fc_conf->mode = RTE_FC_TX_PAUSE;
3849                 break;
3850         case HNS3_FC_RX_PAUSE:
3851                 fc_conf->mode = RTE_FC_RX_PAUSE;
3852                 break;
3853         case HNS3_FC_NONE:
3854         default:
3855                 fc_conf->mode = RTE_FC_NONE;
3856                 break;
3857         }
3858
3859         return 0;
3860 }
3861
3862 static void
3863 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
3864 {
3865         switch (mode) {
3866         case RTE_FC_NONE:
3867                 hw->requested_mode = HNS3_FC_NONE;
3868                 break;
3869         case RTE_FC_RX_PAUSE:
3870                 hw->requested_mode = HNS3_FC_RX_PAUSE;
3871                 break;
3872         case RTE_FC_TX_PAUSE:
3873                 hw->requested_mode = HNS3_FC_TX_PAUSE;
3874                 break;
3875         case RTE_FC_FULL:
3876                 hw->requested_mode = HNS3_FC_FULL;
3877                 break;
3878         default:
3879                 hw->requested_mode = HNS3_FC_NONE;
3880                 hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
3881                           "configured to RTE_FC_NONE", mode);
3882                 break;
3883         }
3884 }
3885
3886 static int
3887 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3888 {
3889         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3890         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3891         int ret;
3892
3893         if (fc_conf->high_water || fc_conf->low_water ||
3894             fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) {
3895                 hns3_err(hw, "Unsupported flow control settings specified, "
3896                          "high_water(%u), low_water(%u), send_xon(%u) and "
3897                          "mac_ctrl_frame_fwd(%u) must be set to '0'",
3898                          fc_conf->high_water, fc_conf->low_water,
3899                          fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd);
3900                 return -EINVAL;
3901         }
3902         if (fc_conf->autoneg) {
3903                 hns3_err(hw, "Unsupported fc auto-negotiation setting.");
3904                 return -EINVAL;
3905         }
3906         if (!fc_conf->pause_time) {
3907                 hns3_err(hw, "Invalid pause time %d setting.",
3908                          fc_conf->pause_time);
3909                 return -EINVAL;
3910         }
3911
3912         if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
3913             hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) {
3914                 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. "
3915                          "current_fc_status = %d", hw->current_fc_status);
3916                 return -EOPNOTSUPP;
3917         }
3918
3919         hns3_get_fc_mode(hw, fc_conf->mode);
3920         if (hw->requested_mode == hw->current_mode &&
3921             pf->pause_time == fc_conf->pause_time)
3922                 return 0;
3923
3924         rte_spinlock_lock(&hw->lock);
3925         ret = hns3_fc_enable(dev, fc_conf);
3926         rte_spinlock_unlock(&hw->lock);
3927
3928         return ret;
3929 }
3930
3931 static int
3932 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,
3933                             struct rte_eth_pfc_conf *pfc_conf)
3934 {
3935         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3936         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3937         uint8_t priority;
3938         int ret;
3939
3940         if (!hns3_dev_dcb_supported(hw)) {
3941                 hns3_err(hw, "This port does not support dcb configurations.");
3942                 return -EOPNOTSUPP;
3943         }
3944
3945         if (pfc_conf->fc.high_water || pfc_conf->fc.low_water ||
3946             pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) {
3947                 hns3_err(hw, "Unsupported flow control settings specified, "
3948                          "high_water(%u), low_water(%u), send_xon(%u) and "
3949                          "mac_ctrl_frame_fwd(%u) must be set to '0'",
3950                          pfc_conf->fc.high_water, pfc_conf->fc.low_water,
3951                          pfc_conf->fc.send_xon,
3952                          pfc_conf->fc.mac_ctrl_frame_fwd);
3953                 return -EINVAL;
3954         }
3955         if (pfc_conf->fc.autoneg) {
3956                 hns3_err(hw, "Unsupported fc auto-negotiation setting.");
3957                 return -EINVAL;
3958         }
3959         if (pfc_conf->fc.pause_time == 0) {
3960                 hns3_err(hw, "Invalid pause time %d setting.",
3961                          pfc_conf->fc.pause_time);
3962                 return -EINVAL;
3963         }
3964
3965         if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
3966             hw->current_fc_status == HNS3_FC_STATUS_PFC)) {
3967                 hns3_err(hw, "MAC pause is enabled. Cannot set PFC."
3968                              "current_fc_status = %d", hw->current_fc_status);
3969                 return -EOPNOTSUPP;
3970         }
3971
3972         priority = pfc_conf->priority;
3973         hns3_get_fc_mode(hw, pfc_conf->fc.mode);
3974         if (hw->dcb_info.pfc_en & BIT(priority) &&
3975             hw->requested_mode == hw->current_mode &&
3976             pfc_conf->fc.pause_time == pf->pause_time)
3977                 return 0;
3978
3979         rte_spinlock_lock(&hw->lock);
3980         ret = hns3_dcb_pfc_enable(dev, pfc_conf);
3981         rte_spinlock_unlock(&hw->lock);
3982
3983         return ret;
3984 }
3985
3986 static int
3987 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
3988 {
3989         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3990         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3991         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
3992         int i;
3993
3994         rte_spinlock_lock(&hw->lock);
3995         if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
3996                 dcb_info->nb_tcs = pf->local_max_tc;
3997         else
3998                 dcb_info->nb_tcs = 1;
3999
4000         for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
4001                 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i];
4002         for (i = 0; i < dcb_info->nb_tcs; i++)
4003                 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
4004
4005         for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
4006                 dcb_info->tc_queue.tc_rxq[0][i].base =
4007                                         hw->tc_queue[i].tqp_offset;
4008                 dcb_info->tc_queue.tc_txq[0][i].base =
4009                                         hw->tc_queue[i].tqp_offset;
4010                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue =
4011                                         hw->tc_queue[i].tqp_count;
4012                 dcb_info->tc_queue.tc_txq[0][i].nb_queue =
4013                                         hw->tc_queue[i].tqp_count;
4014         }
4015         rte_spinlock_unlock(&hw->lock);
4016
4017         return 0;
4018 }
4019
4020 static const struct eth_dev_ops hns3_eth_dev_ops = {
4021         .dev_start          = hns3_dev_start,
4022         .dev_stop           = hns3_dev_stop,
4023         .dev_close          = hns3_dev_close,
4024         .promiscuous_enable = hns3_dev_promiscuous_enable,
4025         .promiscuous_disable = hns3_dev_promiscuous_disable,
4026         .allmulticast_enable  = hns3_dev_allmulticast_enable,
4027         .allmulticast_disable = hns3_dev_allmulticast_disable,
4028         .mtu_set            = hns3_dev_mtu_set,
4029         .dev_infos_get          = hns3_dev_infos_get,
4030         .fw_version_get         = hns3_fw_version_get,
4031         .rx_queue_setup         = hns3_rx_queue_setup,
4032         .tx_queue_setup         = hns3_tx_queue_setup,
4033         .rx_queue_release       = hns3_dev_rx_queue_release,
4034         .tx_queue_release       = hns3_dev_tx_queue_release,
4035         .dev_configure          = hns3_dev_configure,
4036         .flow_ctrl_get          = hns3_flow_ctrl_get,
4037         .flow_ctrl_set          = hns3_flow_ctrl_set,
4038         .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,
4039         .mac_addr_add           = hns3_add_mac_addr,
4040         .mac_addr_remove        = hns3_remove_mac_addr,
4041         .mac_addr_set           = hns3_set_default_mac_addr,
4042         .set_mc_addr_list       = hns3_set_mc_mac_addr_list,
4043         .link_update            = hns3_dev_link_update,
4044         .rss_hash_update        = hns3_dev_rss_hash_update,
4045         .rss_hash_conf_get      = hns3_dev_rss_hash_conf_get,
4046         .reta_update            = hns3_dev_rss_reta_update,
4047         .reta_query             = hns3_dev_rss_reta_query,
4048         .filter_ctrl            = hns3_dev_filter_ctrl,
4049         .vlan_filter_set        = hns3_vlan_filter_set,
4050         .vlan_tpid_set          = hns3_vlan_tpid_set,
4051         .vlan_offload_set       = hns3_vlan_offload_set,
4052         .vlan_pvid_set          = hns3_vlan_pvid_set,
4053         .get_dcb_info           = hns3_get_dcb_info,
4054         .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
4055 };
4056
4057 static int
4058 hns3_dev_init(struct rte_eth_dev *eth_dev)
4059 {
4060         struct rte_device *dev = eth_dev->device;
4061         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4062         struct hns3_adapter *hns = eth_dev->data->dev_private;
4063         struct hns3_hw *hw = &hns->hw;
4064         uint16_t device_id = pci_dev->id.device_id;
4065         int ret;
4066
4067         PMD_INIT_FUNC_TRACE();
4068         eth_dev->process_private = (struct hns3_process_private *)
4069             rte_zmalloc_socket("hns3_filter_list",
4070                                sizeof(struct hns3_process_private),
4071                                RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
4072         if (eth_dev->process_private == NULL) {
4073                 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
4074                 return -ENOMEM;
4075         }
4076         /* initialize flow filter lists */
4077         hns3_filterlist_init(eth_dev);
4078
4079         hns3_set_rxtx_function(eth_dev);
4080         eth_dev->dev_ops = &hns3_eth_dev_ops;
4081         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
4082                 return 0;
4083
4084         hw->adapter_state = HNS3_NIC_UNINITIALIZED;
4085
4086         if (device_id == HNS3_DEV_ID_25GE_RDMA ||
4087             device_id == HNS3_DEV_ID_50GE_RDMA ||
4088             device_id == HNS3_DEV_ID_100G_RDMA_MACSEC)
4089                 hns3_set_bit(hw->flag, HNS3_DEV_SUPPORT_DCB_B, 1);
4090
4091         hns->is_vf = false;
4092         hw->data = eth_dev->data;
4093
4094         /*
4095          * Set default max packet size according to the mtu
4096          * default vale in DPDK frame.
4097          */
4098         hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
4099
4100         ret = hns3_init_pf(eth_dev);
4101         if (ret) {
4102                 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
4103                 goto err_init_pf;
4104         }
4105
4106         /* Allocate memory for storing MAC addresses */
4107         eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac",
4108                                                sizeof(struct rte_ether_addr) *
4109                                                HNS3_UC_MACADDR_NUM, 0);
4110         if (eth_dev->data->mac_addrs == NULL) {
4111                 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
4112                              "to store MAC addresses",
4113                              sizeof(struct rte_ether_addr) *
4114                              HNS3_UC_MACADDR_NUM);
4115                 ret = -ENOMEM;
4116                 goto err_rte_zmalloc;
4117         }
4118
4119         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
4120                             &eth_dev->data->mac_addrs[0]);
4121
4122         hw->adapter_state = HNS3_NIC_INITIALIZED;
4123         /*
4124          * Pass the information to the rte_eth_dev_close() that it should also
4125          * release the private port resources.
4126          */
4127         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
4128
4129         rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
4130         hns3_info(hw, "hns3 dev initialization successful!");
4131         return 0;
4132
4133 err_rte_zmalloc:
4134         hns3_uninit_pf(eth_dev);
4135
4136 err_init_pf:
4137         eth_dev->dev_ops = NULL;
4138         eth_dev->rx_pkt_burst = NULL;
4139         eth_dev->tx_pkt_burst = NULL;
4140         eth_dev->tx_pkt_prepare = NULL;
4141         rte_free(eth_dev->process_private);
4142         eth_dev->process_private = NULL;
4143         return ret;
4144 }
4145
4146 static int
4147 hns3_dev_uninit(struct rte_eth_dev *eth_dev)
4148 {
4149         struct hns3_adapter *hns = eth_dev->data->dev_private;
4150         struct hns3_hw *hw = &hns->hw;
4151
4152         PMD_INIT_FUNC_TRACE();
4153
4154         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
4155                 return -EPERM;
4156
4157         eth_dev->dev_ops = NULL;
4158         eth_dev->rx_pkt_burst = NULL;
4159         eth_dev->tx_pkt_burst = NULL;
4160         eth_dev->tx_pkt_prepare = NULL;
4161         if (hw->adapter_state < HNS3_NIC_CLOSING)
4162                 hns3_dev_close(eth_dev);
4163
4164         hw->adapter_state = HNS3_NIC_REMOVED;
4165         return 0;
4166 }
4167
4168 static int
4169 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
4170                    struct rte_pci_device *pci_dev)
4171 {
4172         return rte_eth_dev_pci_generic_probe(pci_dev,
4173                                              sizeof(struct hns3_adapter),
4174                                              hns3_dev_init);
4175 }
4176
4177 static int
4178 eth_hns3_pci_remove(struct rte_pci_device *pci_dev)
4179 {
4180         return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit);
4181 }
4182
4183 static const struct rte_pci_id pci_id_hns3_map[] = {
4184         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) },
4185         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) },
4186         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) },
4187         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
4188         { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
4189         { .vendor_id = 0, /* sentinel */ },
4190 };
4191
4192 static struct rte_pci_driver rte_hns3_pmd = {
4193         .id_table = pci_id_hns3_map,
4194         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
4195         .probe = eth_hns3_pci_probe,
4196         .remove = eth_hns3_pci_remove,
4197 };
4198
4199 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
4200 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
4201 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
4202
4203 RTE_INIT(hns3_init_log)
4204 {
4205         hns3_logtype_init = rte_log_register("pmd.net.hns3.init");
4206         if (hns3_logtype_init >= 0)
4207                 rte_log_set_level(hns3_logtype_init, RTE_LOG_NOTICE);
4208         hns3_logtype_driver = rte_log_register("pmd.net.hns3.driver");
4209         if (hns3_logtype_driver >= 0)
4210                 rte_log_set_level(hns3_logtype_driver, RTE_LOG_NOTICE);
4211 }