1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
6 #include <rte_bus_pci.h>
7 #include <ethdev_pci.h>
9 #include "hns3_ethdev.h"
10 #include "hns3_common.h"
11 #include "hns3_logs.h"
12 #include "hns3_rxtx.h"
13 #include "hns3_intr.h"
14 #include "hns3_regs.h"
17 #include "hns3_flow.h"
19 #define HNS3_SERVICE_INTERVAL 1000000 /* us */
20 #define HNS3_SERVICE_QUICK_INTERVAL 10
21 #define HNS3_INVALID_PVID 0xFFFF
23 #define HNS3_FILTER_TYPE_VF 0
24 #define HNS3_FILTER_TYPE_PORT 1
25 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0)
26 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0)
27 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1)
28 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2)
29 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3)
30 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \
31 | HNS3_FILTER_FE_ROCE_EGRESS_B)
32 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \
33 | HNS3_FILTER_FE_ROCE_INGRESS_B)
35 /* Reset related Registers */
36 #define HNS3_GLOBAL_RESET_BIT 0
37 #define HNS3_CORE_RESET_BIT 1
38 #define HNS3_IMP_RESET_BIT 2
39 #define HNS3_FUN_RST_ING_B 0
41 #define HNS3_VECTOR0_IMP_RESET_INT_B 1
42 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U
43 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U
44 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U
46 #define HNS3_RESET_WAIT_MS 100
47 #define HNS3_RESET_WAIT_CNT 200
49 /* FEC mode order defined in HNS3 hardware */
50 #define HNS3_HW_FEC_MODE_NOFEC 0
51 #define HNS3_HW_FEC_MODE_BASER 1
52 #define HNS3_HW_FEC_MODE_RS 2
55 HNS3_VECTOR0_EVENT_RST,
56 HNS3_VECTOR0_EVENT_MBX,
57 HNS3_VECTOR0_EVENT_ERR,
58 HNS3_VECTOR0_EVENT_PTP,
59 HNS3_VECTOR0_EVENT_OTHER,
62 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
63 { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
64 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
65 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
67 { RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
68 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
69 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
70 RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
72 { RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
73 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
74 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
76 { RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
77 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
78 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
79 RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
81 { RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
82 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
83 RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
85 { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
86 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
87 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
90 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
92 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
93 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
95 static int hns3_update_link_info(struct rte_eth_dev *eth_dev);
96 static bool hns3_update_link_status(struct hns3_hw *hw);
98 static int hns3_add_mc_mac_addr(struct hns3_hw *hw,
99 struct rte_ether_addr *mac_addr);
100 static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
101 struct rte_ether_addr *mac_addr);
102 static int hns3_restore_fec(struct hns3_hw *hw);
103 static int hns3_query_dev_fec_info(struct hns3_hw *hw);
104 static int hns3_do_stop(struct hns3_adapter *hns);
105 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds);
106 static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable);
110 hns3_pf_disable_irq0(struct hns3_hw *hw)
112 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
116 hns3_pf_enable_irq0(struct hns3_hw *hw)
118 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
121 static enum hns3_evt_cause
122 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay,
125 struct hns3_hw *hw = &hns->hw;
127 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
128 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
129 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
131 hw->reset.stats.imp_cnt++;
132 hns3_warn(hw, "IMP reset detected, clear reset status");
134 hns3_schedule_delayed_reset(hns);
135 hns3_warn(hw, "IMP reset detected, don't clear reset status");
138 return HNS3_VECTOR0_EVENT_RST;
141 static enum hns3_evt_cause
142 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay,
145 struct hns3_hw *hw = &hns->hw;
147 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
148 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
149 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
151 hw->reset.stats.global_cnt++;
152 hns3_warn(hw, "Global reset detected, clear reset status");
154 hns3_schedule_delayed_reset(hns);
156 "Global reset detected, don't clear reset status");
159 return HNS3_VECTOR0_EVENT_RST;
162 static enum hns3_evt_cause
163 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
165 struct hns3_hw *hw = &hns->hw;
166 uint32_t vector0_int_stats;
167 uint32_t cmdq_src_val;
168 uint32_t hw_err_src_reg;
170 enum hns3_evt_cause ret;
173 /* fetch the events from their corresponding regs */
174 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
175 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
176 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
178 is_delay = clearval == NULL ? true : false;
180 * Assumption: If by any chance reset and mailbox events are reported
181 * together then we will only process reset event and defer the
182 * processing of the mailbox events. Since, we would have not cleared
183 * RX CMDQ event this time we would receive again another interrupt
184 * from H/W just for the mailbox.
186 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */
187 ret = hns3_proc_imp_reset_event(hns, is_delay, &val);
192 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) {
193 ret = hns3_proc_global_reset_event(hns, is_delay, &val);
197 /* Check for vector0 1588 event source */
198 if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) {
199 val = BIT(HNS3_VECTOR0_1588_INT_B);
200 ret = HNS3_VECTOR0_EVENT_PTP;
204 /* check for vector0 msix event source */
205 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK ||
206 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) {
207 val = vector0_int_stats | hw_err_src_reg;
208 ret = HNS3_VECTOR0_EVENT_ERR;
212 /* check for vector0 mailbox(=CMDQ RX) event source */
213 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) {
214 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
216 ret = HNS3_VECTOR0_EVENT_MBX;
220 val = vector0_int_stats;
221 ret = HNS3_VECTOR0_EVENT_OTHER;
230 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
232 if (event_type == HNS3_VECTOR0_EVENT_RST ||
233 event_type == HNS3_VECTOR0_EVENT_PTP)
234 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
235 else if (event_type == HNS3_VECTOR0_EVENT_MBX)
236 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
240 hns3_clear_all_event_cause(struct hns3_hw *hw)
242 uint32_t vector0_int_stats;
244 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
245 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats)
246 hns3_warn(hw, "Probe during IMP reset interrupt");
248 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats)
249 hns3_warn(hw, "Probe during Global reset interrupt");
251 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST,
252 BIT(HNS3_VECTOR0_IMPRESET_INT_B) |
253 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) |
254 BIT(HNS3_VECTOR0_CORERESET_INT_B));
255 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0);
256 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP,
257 BIT(HNS3_VECTOR0_1588_INT_B));
261 hns3_handle_mac_tnl(struct hns3_hw *hw)
263 struct hns3_cmd_desc desc;
267 /* query and clear mac tnl interrupt */
268 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true);
269 ret = hns3_cmd_send(hw, &desc, 1);
271 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret);
275 status = rte_le_to_cpu_32(desc.data[0]);
277 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status);
278 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT,
280 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR);
281 ret = hns3_cmd_send(hw, &desc, 1);
283 hns3_err(hw, "failed to clear mac tnl int, ret = %d.",
289 hns3_interrupt_handler(void *param)
291 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
292 struct hns3_adapter *hns = dev->data->dev_private;
293 struct hns3_hw *hw = &hns->hw;
294 enum hns3_evt_cause event_cause;
295 uint32_t clearval = 0;
296 uint32_t vector0_int;
300 /* Disable interrupt */
301 hns3_pf_disable_irq0(hw);
303 event_cause = hns3_check_event_cause(hns, &clearval);
304 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
305 ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
306 cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
307 hns3_clear_event_cause(hw, event_cause, clearval);
308 /* vector 0 interrupt is shared with reset and mailbox source events. */
309 if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
310 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x "
311 "ras_int_stat:0x%x cmdq_int_stat:0x%x",
312 vector0_int, ras_int, cmdq_int);
313 hns3_handle_mac_tnl(hw);
314 hns3_handle_error(hns);
315 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) {
316 hns3_warn(hw, "received reset interrupt");
317 hns3_schedule_reset(hns);
318 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) {
319 hns3_dev_handle_mbx_msg(hw);
321 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x "
322 "ras_int_stat:0x%x cmdq_int_stat:0x%x",
323 vector0_int, ras_int, cmdq_int);
326 /* Enable interrupt if it is not cause by reset */
327 hns3_pf_enable_irq0(hw);
331 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on)
333 #define HNS3_VLAN_ID_OFFSET_STEP 160
334 #define HNS3_VLAN_BYTE_SIZE 8
335 struct hns3_vlan_filter_pf_cfg_cmd *req;
336 struct hns3_hw *hw = &hns->hw;
337 uint8_t vlan_offset_byte_val;
338 struct hns3_cmd_desc desc;
339 uint8_t vlan_offset_byte;
340 uint8_t vlan_offset_base;
343 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false);
345 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP;
346 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) /
348 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE);
350 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data;
351 req->vlan_offset = vlan_offset_base;
352 req->vlan_cfg = on ? 0 : 1;
353 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
355 ret = hns3_cmd_send(hw, &desc, 1);
357 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d",
364 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id)
366 struct hns3_user_vlan_table *vlan_entry;
367 struct hns3_pf *pf = &hns->pf;
369 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
370 if (vlan_entry->vlan_id == vlan_id) {
371 if (vlan_entry->hd_tbl_status)
372 hns3_set_port_vlan_filter(hns, vlan_id, 0);
373 LIST_REMOVE(vlan_entry, next);
374 rte_free(vlan_entry);
381 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
384 struct hns3_user_vlan_table *vlan_entry;
385 struct hns3_hw *hw = &hns->hw;
386 struct hns3_pf *pf = &hns->pf;
388 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
389 if (vlan_entry->vlan_id == vlan_id)
393 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0);
394 if (vlan_entry == NULL) {
395 hns3_err(hw, "Failed to malloc hns3 vlan table");
399 vlan_entry->hd_tbl_status = writen_to_tbl;
400 vlan_entry->vlan_id = vlan_id;
402 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);
406 hns3_restore_vlan_table(struct hns3_adapter *hns)
408 struct hns3_user_vlan_table *vlan_entry;
409 struct hns3_hw *hw = &hns->hw;
410 struct hns3_pf *pf = &hns->pf;
414 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE)
415 return hns3_vlan_pvid_configure(hns,
416 hw->port_base_vlan_cfg.pvid, 1);
418 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
419 if (vlan_entry->hd_tbl_status) {
420 vlan_id = vlan_entry->vlan_id;
421 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1);
431 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
433 struct hns3_hw *hw = &hns->hw;
434 bool writen_to_tbl = false;
438 * When vlan filter is enabled, hardware regards packets without vlan
439 * as packets with vlan 0. So, to receive packets without vlan, vlan id
440 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter.
442 if (on == 0 && vlan_id == 0)
446 * When port base vlan enabled, we use port base vlan as the vlan
447 * filter condition. In this case, we don't update vlan filter table
448 * when user add new vlan or remove exist vlan, just update the
449 * vlan list. The vlan id in vlan list will be written in vlan filter
450 * table until port base vlan disabled
452 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
453 ret = hns3_set_port_vlan_filter(hns, vlan_id, on);
454 writen_to_tbl = true;
459 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);
461 hns3_rm_dev_vlan_table(hns, vlan_id);
467 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
469 struct hns3_adapter *hns = dev->data->dev_private;
470 struct hns3_hw *hw = &hns->hw;
473 rte_spinlock_lock(&hw->lock);
474 ret = hns3_vlan_filter_configure(hns, vlan_id, on);
475 rte_spinlock_unlock(&hw->lock);
480 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
483 struct hns3_rx_vlan_type_cfg_cmd *rx_req;
484 struct hns3_tx_vlan_type_cfg_cmd *tx_req;
485 struct hns3_hw *hw = &hns->hw;
486 struct hns3_cmd_desc desc;
489 if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
490 vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
491 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
495 if (tpid != RTE_ETHER_TYPE_VLAN) {
496 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type);
500 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
501 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
503 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
504 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
505 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
506 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
507 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
508 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
509 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
510 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid);
513 ret = hns3_cmd_send(hw, &desc, 1);
515 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d",
520 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false);
522 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data;
523 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid);
524 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid);
526 ret = hns3_cmd_send(hw, &desc, 1);
528 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d",
534 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
537 struct hns3_adapter *hns = dev->data->dev_private;
538 struct hns3_hw *hw = &hns->hw;
541 rte_spinlock_lock(&hw->lock);
542 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid);
543 rte_spinlock_unlock(&hw->lock);
548 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
549 struct hns3_rx_vtag_cfg *vcfg)
551 struct hns3_vport_vtag_rx_cfg_cmd *req;
552 struct hns3_hw *hw = &hns->hw;
553 struct hns3_cmd_desc desc;
558 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false);
560 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data;
561 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B,
562 vcfg->strip_tag1_en ? 1 : 0);
563 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B,
564 vcfg->strip_tag2_en ? 1 : 0);
565 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B,
566 vcfg->vlan1_vlan_prionly ? 1 : 0);
567 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
568 vcfg->vlan2_vlan_prionly ? 1 : 0);
570 /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
571 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B,
572 vcfg->strip_tag1_discard_en ? 1 : 0);
573 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B,
574 vcfg->strip_tag2_discard_en ? 1 : 0);
576 * In current version VF is not supported when PF is driven by DPDK
577 * driver, just need to configure parameters for PF vport.
579 vport_id = HNS3_PF_FUNC_ID;
580 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
581 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
582 req->vf_bitmap[req->vf_offset] = bitmap;
584 ret = hns3_cmd_send(hw, &desc, 1);
586 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret);
591 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
593 struct hns3_rx_vtag_cfg rxvlan_cfg;
594 struct hns3_hw *hw = &hns->hw;
597 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
598 rxvlan_cfg.strip_tag1_en = false;
599 rxvlan_cfg.strip_tag2_en = enable;
600 rxvlan_cfg.strip_tag2_discard_en = false;
602 rxvlan_cfg.strip_tag1_en = enable;
603 rxvlan_cfg.strip_tag2_en = true;
604 rxvlan_cfg.strip_tag2_discard_en = true;
607 rxvlan_cfg.strip_tag1_discard_en = false;
608 rxvlan_cfg.vlan1_vlan_prionly = false;
609 rxvlan_cfg.vlan2_vlan_prionly = false;
610 rxvlan_cfg.rx_vlan_offload_en = enable;
612 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
614 hns3_err(hw, "%s strip rx vtag failed, ret = %d.",
615 enable ? "enable" : "disable", ret);
619 memcpy(&hns->pf.vtag_config.rx_vcfg, &rxvlan_cfg,
620 sizeof(struct hns3_rx_vtag_cfg));
626 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type,
627 uint8_t fe_type, bool filter_en, uint8_t vf_id)
629 struct hns3_vlan_filter_ctrl_cmd *req;
630 struct hns3_cmd_desc desc;
633 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false);
635 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data;
636 req->vlan_type = vlan_type;
637 req->vlan_fe = filter_en ? fe_type : 0;
640 ret = hns3_cmd_send(hw, &desc, 1);
642 hns3_err(hw, "set vlan filter fail, ret =%d", ret);
648 hns3_vlan_filter_init(struct hns3_adapter *hns)
650 struct hns3_hw *hw = &hns->hw;
653 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF,
654 HNS3_FILTER_FE_EGRESS, false,
657 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret);
661 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
662 HNS3_FILTER_FE_INGRESS, false,
665 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret);
671 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable)
673 struct hns3_hw *hw = &hns->hw;
676 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
677 HNS3_FILTER_FE_INGRESS, enable,
680 hns3_err(hw, "failed to %s port vlan filter, ret = %d",
681 enable ? "enable" : "disable", ret);
687 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
689 struct hns3_adapter *hns = dev->data->dev_private;
690 struct hns3_hw *hw = &hns->hw;
691 struct rte_eth_rxmode *rxmode;
692 unsigned int tmp_mask;
696 rte_spinlock_lock(&hw->lock);
697 rxmode = &dev->data->dev_conf.rxmode;
698 tmp_mask = (unsigned int)mask;
699 if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
700 /* ignore vlan filter configuration during promiscuous mode */
701 if (!dev->data->promiscuous) {
702 /* Enable or disable VLAN filter */
703 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
706 ret = hns3_enable_vlan_filter(hns, enable);
708 rte_spinlock_unlock(&hw->lock);
709 hns3_err(hw, "failed to %s rx filter, ret = %d",
710 enable ? "enable" : "disable", ret);
716 if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
717 /* Enable or disable VLAN stripping */
718 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
721 ret = hns3_en_hw_strip_rxvtag(hns, enable);
723 rte_spinlock_unlock(&hw->lock);
724 hns3_err(hw, "failed to %s rx strip, ret = %d",
725 enable ? "enable" : "disable", ret);
730 rte_spinlock_unlock(&hw->lock);
736 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
737 struct hns3_tx_vtag_cfg *vcfg)
739 struct hns3_vport_vtag_tx_cfg_cmd *req;
740 struct hns3_cmd_desc desc;
741 struct hns3_hw *hw = &hns->hw;
746 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false);
748 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data;
749 req->def_vlan_tag1 = vcfg->default_tag1;
750 req->def_vlan_tag2 = vcfg->default_tag2;
751 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B,
752 vcfg->accept_tag1 ? 1 : 0);
753 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B,
754 vcfg->accept_untag1 ? 1 : 0);
755 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B,
756 vcfg->accept_tag2 ? 1 : 0);
757 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B,
758 vcfg->accept_untag2 ? 1 : 0);
759 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B,
760 vcfg->insert_tag1_en ? 1 : 0);
761 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B,
762 vcfg->insert_tag2_en ? 1 : 0);
763 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
765 /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
766 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B,
767 vcfg->tag_shift_mode_en ? 1 : 0);
770 * In current version VF is not supported when PF is driven by DPDK
771 * driver, just need to configure parameters for PF vport.
773 vport_id = HNS3_PF_FUNC_ID;
774 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
775 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
776 req->vf_bitmap[req->vf_offset] = bitmap;
778 ret = hns3_cmd_send(hw, &desc, 1);
780 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret);
786 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state,
789 struct hns3_hw *hw = &hns->hw;
790 struct hns3_tx_vtag_cfg txvlan_cfg;
793 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) {
794 txvlan_cfg.accept_tag1 = true;
795 txvlan_cfg.insert_tag1_en = false;
796 txvlan_cfg.default_tag1 = 0;
798 txvlan_cfg.accept_tag1 =
799 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE;
800 txvlan_cfg.insert_tag1_en = true;
801 txvlan_cfg.default_tag1 = pvid;
804 txvlan_cfg.accept_untag1 = true;
805 txvlan_cfg.accept_tag2 = true;
806 txvlan_cfg.accept_untag2 = true;
807 txvlan_cfg.insert_tag2_en = false;
808 txvlan_cfg.default_tag2 = 0;
809 txvlan_cfg.tag_shift_mode_en = true;
811 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg);
813 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid,
818 memcpy(&hns->pf.vtag_config.tx_vcfg, &txvlan_cfg,
819 sizeof(struct hns3_tx_vtag_cfg));
826 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list)
828 struct hns3_user_vlan_table *vlan_entry;
829 struct hns3_pf *pf = &hns->pf;
831 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
832 if (vlan_entry->hd_tbl_status) {
833 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0);
834 vlan_entry->hd_tbl_status = false;
839 vlan_entry = LIST_FIRST(&pf->vlan_list);
841 LIST_REMOVE(vlan_entry, next);
842 rte_free(vlan_entry);
843 vlan_entry = LIST_FIRST(&pf->vlan_list);
849 hns3_add_all_vlan_table(struct hns3_adapter *hns)
851 struct hns3_user_vlan_table *vlan_entry;
852 struct hns3_pf *pf = &hns->pf;
854 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
855 if (!vlan_entry->hd_tbl_status) {
856 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1);
857 vlan_entry->hd_tbl_status = true;
863 hns3_remove_all_vlan_table(struct hns3_adapter *hns)
865 struct hns3_hw *hw = &hns->hw;
868 hns3_rm_all_vlan_table(hns, true);
869 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) {
870 ret = hns3_set_port_vlan_filter(hns,
871 hw->port_base_vlan_cfg.pvid, 0);
873 hns3_err(hw, "Failed to remove all vlan table, ret =%d",
881 hns3_update_vlan_filter_entries(struct hns3_adapter *hns,
882 uint16_t port_base_vlan_state, uint16_t new_pvid)
884 struct hns3_hw *hw = &hns->hw;
888 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) {
889 old_pvid = hw->port_base_vlan_cfg.pvid;
890 if (old_pvid != HNS3_INVALID_PVID) {
891 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0);
893 hns3_err(hw, "failed to remove old pvid %u, "
894 "ret = %d", old_pvid, ret);
899 hns3_rm_all_vlan_table(hns, false);
900 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1);
902 hns3_err(hw, "failed to add new pvid %u, ret = %d",
907 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0);
909 hns3_err(hw, "failed to remove pvid %u, ret = %d",
914 hns3_add_all_vlan_table(hns);
920 hns3_en_pvid_strip(struct hns3_adapter *hns, int on)
922 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg;
923 struct hns3_rx_vtag_cfg rx_vlan_cfg;
927 rx_strip_en = old_cfg->rx_vlan_offload_en;
929 rx_vlan_cfg.strip_tag1_en = rx_strip_en;
930 rx_vlan_cfg.strip_tag2_en = true;
931 rx_vlan_cfg.strip_tag2_discard_en = true;
933 rx_vlan_cfg.strip_tag1_en = false;
934 rx_vlan_cfg.strip_tag2_en = rx_strip_en;
935 rx_vlan_cfg.strip_tag2_discard_en = false;
937 rx_vlan_cfg.strip_tag1_discard_en = false;
938 rx_vlan_cfg.vlan1_vlan_prionly = false;
939 rx_vlan_cfg.vlan2_vlan_prionly = false;
940 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en;
942 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg);
946 memcpy(&hns->pf.vtag_config.rx_vcfg, &rx_vlan_cfg,
947 sizeof(struct hns3_rx_vtag_cfg));
953 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on)
955 struct hns3_hw *hw = &hns->hw;
956 uint16_t port_base_vlan_state;
959 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) {
960 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID)
961 hns3_warn(hw, "Invalid operation! As current pvid set "
962 "is %u, disable pvid %u is invalid",
963 hw->port_base_vlan_cfg.pvid, pvid);
967 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE :
968 HNS3_PORT_BASE_VLAN_DISABLE;
969 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid);
971 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d",
976 ret = hns3_en_pvid_strip(hns, on);
978 hns3_err(hw, "failed to config rx vlan strip for pvid, "
980 goto pvid_vlan_strip_fail;
983 if (pvid == HNS3_INVALID_PVID)
985 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid);
987 hns3_err(hw, "failed to update vlan filter entries, ret = %d",
989 goto vlan_filter_set_fail;
993 hw->port_base_vlan_cfg.state = port_base_vlan_state;
994 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID;
997 vlan_filter_set_fail:
998 err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state ==
999 HNS3_PORT_BASE_VLAN_ENABLE);
1001 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err);
1003 pvid_vlan_strip_fail:
1004 err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state,
1005 hw->port_base_vlan_cfg.pvid);
1007 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err);
1013 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1015 struct hns3_adapter *hns = dev->data->dev_private;
1016 struct hns3_hw *hw = &hns->hw;
1017 bool pvid_en_state_change;
1018 uint16_t pvid_state;
1021 if (pvid > RTE_ETHER_MAX_VLAN_ID) {
1022 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid,
1023 RTE_ETHER_MAX_VLAN_ID);
1028 * If PVID configuration state change, should refresh the PVID
1029 * configuration state in struct hns3_tx_queue/hns3_rx_queue.
1031 pvid_state = hw->port_base_vlan_cfg.state;
1032 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) ||
1033 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE))
1034 pvid_en_state_change = false;
1036 pvid_en_state_change = true;
1038 rte_spinlock_lock(&hw->lock);
1039 ret = hns3_vlan_pvid_configure(hns, pvid, on);
1040 rte_spinlock_unlock(&hw->lock);
1044 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx
1045 * need be processed by PMD.
1047 if (pvid_en_state_change &&
1048 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1049 hns3_update_all_queues_pvid_proc_en(hw);
1055 hns3_default_vlan_config(struct hns3_adapter *hns)
1057 struct hns3_hw *hw = &hns->hw;
1061 * When vlan filter is enabled, hardware regards packets without vlan
1062 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan
1063 * table, packets without vlan won't be received. So, add vlan 0 as
1066 ret = hns3_vlan_filter_configure(hns, 0, 1);
1068 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret);
1073 hns3_init_vlan_config(struct hns3_adapter *hns)
1075 struct hns3_hw *hw = &hns->hw;
1079 * This function can be called in the initialization and reset process,
1080 * when in reset process, it means that hardware had been reseted
1081 * successfully and we need to restore the hardware configuration to
1082 * ensure that the hardware configuration remains unchanged before and
1085 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1086 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
1087 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
1090 ret = hns3_vlan_filter_init(hns);
1092 hns3_err(hw, "vlan init fail in pf, ret =%d", ret);
1096 ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
1097 RTE_ETHER_TYPE_VLAN);
1099 hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
1104 * When in the reinit dev stage of the reset process, the following
1105 * vlan-related configurations may differ from those at initialization,
1106 * we will restore configurations to hardware in hns3_restore_vlan_table
1107 * and hns3_restore_vlan_conf later.
1109 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1110 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
1112 hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
1116 ret = hns3_en_hw_strip_rxvtag(hns, false);
1118 hns3_err(hw, "rx strip configure fail in pf, ret =%d",
1124 return hns3_default_vlan_config(hns);
1128 hns3_restore_vlan_conf(struct hns3_adapter *hns)
1130 struct hns3_pf *pf = &hns->pf;
1131 struct hns3_hw *hw = &hns->hw;
1136 if (!hw->data->promiscuous) {
1137 /* restore vlan filter states */
1138 offloads = hw->data->dev_conf.rxmode.offloads;
1139 enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
1140 ret = hns3_enable_vlan_filter(hns, enable);
1142 hns3_err(hw, "failed to restore vlan rx filter conf, "
1148 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg);
1150 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret);
1154 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg);
1156 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret);
1162 hns3_dev_configure_vlan(struct rte_eth_dev *dev)
1164 struct hns3_adapter *hns = dev->data->dev_private;
1165 struct rte_eth_dev_data *data = dev->data;
1166 struct rte_eth_txmode *txmode;
1167 struct hns3_hw *hw = &hns->hw;
1171 txmode = &data->dev_conf.txmode;
1172 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged)
1174 "hw_vlan_reject_tagged or hw_vlan_reject_untagged "
1175 "configuration is not supported! Ignore these two "
1176 "parameters: hw_vlan_reject_tagged(%u), "
1177 "hw_vlan_reject_untagged(%u)",
1178 txmode->hw_vlan_reject_tagged,
1179 txmode->hw_vlan_reject_untagged);
1181 /* Apply vlan offload setting */
1182 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
1183 ret = hns3_vlan_offload_set(dev, mask);
1185 hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
1191 * If pvid config is not set in rte_eth_conf, driver needn't to set
1192 * VLAN pvid related configuration to hardware.
1194 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0)
1197 /* Apply pvid setting */
1198 ret = hns3_vlan_pvid_set(dev, txmode->pvid,
1199 txmode->hw_vlan_insert_pvid);
1201 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d",
1208 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
1209 unsigned int tso_mss_max)
1211 struct hns3_cfg_tso_status_cmd *req;
1212 struct hns3_cmd_desc desc;
1215 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false);
1217 req = (struct hns3_cfg_tso_status_cmd *)desc.data;
1220 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1222 req->tso_mss_min = rte_cpu_to_le_16(tso_mss);
1225 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1227 req->tso_mss_max = rte_cpu_to_le_16(tso_mss);
1229 return hns3_cmd_send(hw, &desc, 1);
1233 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size,
1234 uint16_t *allocated_size, bool is_alloc)
1236 struct hns3_umv_spc_alc_cmd *req;
1237 struct hns3_cmd_desc desc;
1240 req = (struct hns3_umv_spc_alc_cmd *)desc.data;
1241 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false);
1242 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1);
1243 req->space_size = rte_cpu_to_le_32(space_size);
1245 ret = hns3_cmd_send(hw, &desc, 1);
1247 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d",
1248 is_alloc ? "allocate" : "free", ret);
1252 if (is_alloc && allocated_size)
1253 *allocated_size = rte_le_to_cpu_32(desc.data[1]);
1259 hns3_init_umv_space(struct hns3_hw *hw)
1261 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1262 struct hns3_pf *pf = &hns->pf;
1263 uint16_t allocated_size = 0;
1266 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size,
1271 if (allocated_size < pf->wanted_umv_size)
1272 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u",
1273 pf->wanted_umv_size, allocated_size);
1275 pf->max_umv_size = (!!allocated_size) ? allocated_size :
1276 pf->wanted_umv_size;
1277 pf->used_umv_size = 0;
1282 hns3_uninit_umv_space(struct hns3_hw *hw)
1284 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1285 struct hns3_pf *pf = &hns->pf;
1288 if (pf->max_umv_size == 0)
1291 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false);
1295 pf->max_umv_size = 0;
1301 hns3_is_umv_space_full(struct hns3_hw *hw)
1303 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1304 struct hns3_pf *pf = &hns->pf;
1307 is_full = (pf->used_umv_size >= pf->max_umv_size);
1313 hns3_update_umv_space(struct hns3_hw *hw, bool is_free)
1315 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1316 struct hns3_pf *pf = &hns->pf;
1319 if (pf->used_umv_size > 0)
1320 pf->used_umv_size--;
1322 pf->used_umv_size++;
1326 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req,
1327 const uint8_t *addr, bool is_mc)
1329 const unsigned char *mac_addr = addr;
1330 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) |
1331 ((uint32_t)mac_addr[2] << 16) |
1332 ((uint32_t)mac_addr[1] << 8) |
1333 (uint32_t)mac_addr[0];
1334 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4];
1336 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1338 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1339 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1);
1340 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1343 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val);
1344 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff);
1348 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp,
1350 enum hns3_mac_vlan_tbl_opcode op)
1353 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u",
1358 if (op == HNS3_MAC_VLAN_ADD) {
1359 if (resp_code == 0 || resp_code == 1) {
1361 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) {
1362 hns3_err(hw, "add mac addr failed for uc_overflow");
1364 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) {
1365 hns3_err(hw, "add mac addr failed for mc_overflow");
1369 hns3_err(hw, "add mac addr failed for undefined, code=%u",
1372 } else if (op == HNS3_MAC_VLAN_REMOVE) {
1373 if (resp_code == 0) {
1375 } else if (resp_code == 1) {
1376 hns3_dbg(hw, "remove mac addr failed for miss");
1380 hns3_err(hw, "remove mac addr failed for undefined, code=%u",
1383 } else if (op == HNS3_MAC_VLAN_LKUP) {
1384 if (resp_code == 0) {
1386 } else if (resp_code == 1) {
1387 hns3_dbg(hw, "lookup mac addr failed for miss");
1391 hns3_err(hw, "lookup mac addr failed for undefined, code=%u",
1396 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u",
1403 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw,
1404 struct hns3_mac_vlan_tbl_entry_cmd *req,
1405 struct hns3_cmd_desc *desc, uint8_t desc_num)
1412 if (desc_num == HNS3_MC_MAC_VLAN_OPS_DESC_NUM) {
1413 for (i = 0; i < desc_num - 1; i++) {
1414 hns3_cmd_setup_basic_desc(&desc[i],
1415 HNS3_OPC_MAC_VLAN_ADD, true);
1416 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1418 memcpy(desc[i].data, req,
1419 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1421 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_MAC_VLAN_ADD,
1424 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD,
1426 memcpy(desc[0].data, req,
1427 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1429 ret = hns3_cmd_send(hw, desc, desc_num);
1431 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.",
1435 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1436 retval = rte_le_to_cpu_16(desc[0].retval);
1438 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1439 HNS3_MAC_VLAN_LKUP);
1443 hns3_add_mac_vlan_tbl(struct hns3_hw *hw,
1444 struct hns3_mac_vlan_tbl_entry_cmd *req,
1445 struct hns3_cmd_desc *desc, uint8_t desc_num)
1453 if (desc_num == HNS3_UC_MAC_VLAN_OPS_DESC_NUM) {
1454 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_MAC_VLAN_ADD, false);
1455 memcpy(desc->data, req,
1456 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1457 ret = hns3_cmd_send(hw, desc, desc_num);
1458 resp_code = (rte_le_to_cpu_32(desc->data[0]) >> 8) & 0xff;
1459 retval = rte_le_to_cpu_16(desc->retval);
1461 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1464 for (i = 0; i < desc_num; i++) {
1465 hns3_cmd_reuse_desc(&desc[i], false);
1466 if (i == desc_num - 1)
1468 rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT);
1471 rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1473 memcpy(desc[0].data, req,
1474 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1476 ret = hns3_cmd_send(hw, desc, desc_num);
1477 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1478 retval = rte_le_to_cpu_16(desc[0].retval);
1480 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1485 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret);
1493 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw,
1494 struct hns3_mac_vlan_tbl_entry_cmd *req)
1496 struct hns3_cmd_desc desc;
1501 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false);
1503 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1505 ret = hns3_cmd_send(hw, &desc, 1);
1507 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret);
1510 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1511 retval = rte_le_to_cpu_16(desc.retval);
1513 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1514 HNS3_MAC_VLAN_REMOVE);
1518 hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1520 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1521 struct hns3_mac_vlan_tbl_entry_cmd req;
1522 struct hns3_pf *pf = &hns->pf;
1523 struct hns3_cmd_desc desc;
1524 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1525 uint16_t egress_port = 0;
1529 /* check if mac addr is valid */
1530 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1531 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1533 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid",
1538 memset(&req, 0, sizeof(req));
1541 * In current version VF is not supported when PF is driven by DPDK
1542 * driver, just need to configure parameters for PF vport.
1544 vf_id = HNS3_PF_FUNC_ID;
1545 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M,
1546 HNS3_MAC_EPORT_VFID_S, vf_id);
1548 req.egress_port = rte_cpu_to_le_16(egress_port);
1550 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1553 * Lookup the mac address in the mac_vlan table, and add
1554 * it if the entry is inexistent. Repeated unicast entry
1555 * is not allowed in the mac vlan table.
1557 ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc,
1558 HNS3_UC_MAC_VLAN_OPS_DESC_NUM);
1559 if (ret == -ENOENT) {
1560 if (!hns3_is_umv_space_full(hw)) {
1561 ret = hns3_add_mac_vlan_tbl(hw, &req, &desc,
1562 HNS3_UC_MAC_VLAN_OPS_DESC_NUM);
1564 hns3_update_umv_space(hw, false);
1568 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size);
1573 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
1575 /* check if we just hit the duplicate */
1577 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str);
1581 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table",
1588 hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1590 struct hns3_mac_vlan_tbl_entry_cmd req;
1591 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1594 /* check if mac addr is valid */
1595 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1596 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1598 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid",
1603 memset(&req, 0, sizeof(req));
1604 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1605 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1606 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1607 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */
1610 hns3_update_umv_space(hw, true);
1616 hns3_set_default_mac_addr(struct rte_eth_dev *dev,
1617 struct rte_ether_addr *mac_addr)
1619 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1620 struct rte_ether_addr *oaddr;
1621 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1624 rte_spinlock_lock(&hw->lock);
1625 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr;
1626 ret = hw->ops.del_uc_mac_addr(hw, oaddr);
1628 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1630 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
1633 rte_spinlock_unlock(&hw->lock);
1637 ret = hw->ops.add_uc_mac_addr(hw, mac_addr);
1639 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1641 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret);
1642 goto err_add_uc_addr;
1645 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes);
1647 hns3_err(hw, "Failed to configure mac pause address: %d", ret);
1648 goto err_pause_addr_cfg;
1651 rte_ether_addr_copy(mac_addr,
1652 (struct rte_ether_addr *)hw->mac.mac_addr);
1653 rte_spinlock_unlock(&hw->lock);
1658 ret_val = hw->ops.del_uc_mac_addr(hw, mac_addr);
1660 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1663 "Failed to roll back to del setted mac addr(%s): %d",
1668 ret_val = hw->ops.add_uc_mac_addr(hw, oaddr);
1670 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr);
1671 hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d",
1674 rte_spinlock_unlock(&hw->lock);
1680 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr)
1682 #define HNS3_VF_NUM_IN_FIRST_DESC 192
1686 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) {
1687 word_num = vfid / 32;
1688 bit_num = vfid % 32;
1690 desc[1].data[word_num] &=
1691 rte_cpu_to_le_32(~(1UL << bit_num));
1693 desc[1].data[word_num] |=
1694 rte_cpu_to_le_32(1UL << bit_num);
1696 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32;
1697 bit_num = vfid % 32;
1699 desc[2].data[word_num] &=
1700 rte_cpu_to_le_32(~(1UL << bit_num));
1702 desc[2].data[word_num] |=
1703 rte_cpu_to_le_32(1UL << bit_num);
1708 hns3_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1710 struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM];
1711 struct hns3_mac_vlan_tbl_entry_cmd req;
1712 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1716 /* Check if mac addr is valid */
1717 if (!rte_is_multicast_ether_addr(mac_addr)) {
1718 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1720 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid",
1725 memset(&req, 0, sizeof(req));
1726 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1727 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1728 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc,
1729 HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1731 /* This mac addr do not exist, add new entry for it */
1732 memset(desc[0].data, 0, sizeof(desc[0].data));
1733 memset(desc[1].data, 0, sizeof(desc[0].data));
1734 memset(desc[2].data, 0, sizeof(desc[0].data));
1738 * In current version VF is not supported when PF is driven by DPDK
1739 * driver, just need to configure parameters for PF vport.
1741 vf_id = HNS3_PF_FUNC_ID;
1742 hns3_update_desc_vfid(desc, vf_id, false);
1743 ret = hns3_add_mac_vlan_tbl(hw, &req, desc,
1744 HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1747 hns3_err(hw, "mc mac vlan table is full");
1748 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1750 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret);
1757 hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1759 struct hns3_mac_vlan_tbl_entry_cmd req;
1760 struct hns3_cmd_desc desc[3];
1761 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1765 /* Check if mac addr is valid */
1766 if (!rte_is_multicast_ether_addr(mac_addr)) {
1767 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1769 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid",
1774 memset(&req, 0, sizeof(req));
1775 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1776 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1777 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc,
1778 HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1781 * This mac addr exist, remove this handle's VFID for it.
1782 * In current version VF is not supported when PF is driven by
1783 * DPDK driver, just need to configure parameters for PF vport.
1785 vf_id = HNS3_PF_FUNC_ID;
1786 hns3_update_desc_vfid(desc, vf_id, true);
1788 /* All the vfid is zero, so need to delete this entry */
1789 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1790 } else if (ret == -ENOENT) {
1791 /* This mac addr doesn't exist. */
1796 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1798 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret);
1805 hns3_check_mq_mode(struct rte_eth_dev *dev)
1807 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1808 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1809 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1810 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1811 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1812 struct rte_eth_dcb_tx_conf *dcb_tx_conf;
1817 if (((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
1818 (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
1819 tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
1820 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
1821 rx_mq_mode, tx_mq_mode);
1825 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1826 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
1827 if ((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
1828 if (dcb_rx_conf->nb_tcs > pf->tc_max) {
1829 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
1830 dcb_rx_conf->nb_tcs, pf->tc_max);
1834 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
1835 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
1836 hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
1837 "nb_tcs(%d) != %d or %d in rx direction.",
1838 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
1842 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
1843 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
1844 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
1848 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1849 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
1850 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, "
1851 "is not equal to one in tx direction.",
1852 i, dcb_rx_conf->dcb_tc[i]);
1855 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1856 max_tc = dcb_rx_conf->dcb_tc[i];
1859 num_tc = max_tc + 1;
1860 if (num_tc > dcb_rx_conf->nb_tcs) {
1861 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
1862 num_tc, dcb_rx_conf->nb_tcs);
1871 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en,
1872 enum hns3_ring_type queue_type, uint16_t queue_id)
1874 struct hns3_cmd_desc desc;
1875 struct hns3_ctrl_vector_chain_cmd *req =
1876 (struct hns3_ctrl_vector_chain_cmd *)desc.data;
1877 enum hns3_opcode_type op;
1878 uint16_t tqp_type_and_id = 0;
1883 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
1884 hns3_cmd_setup_basic_desc(&desc, op, false);
1885 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M,
1886 HNS3_TQP_INT_ID_L_S);
1887 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M,
1888 HNS3_TQP_INT_ID_H_S);
1890 if (queue_type == HNS3_RING_TYPE_RX)
1891 gl = HNS3_RING_GL_RX;
1893 gl = HNS3_RING_GL_TX;
1897 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
1899 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
1900 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
1902 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
1903 req->int_cause_num = 1;
1904 ret = hns3_cmd_send(hw, &desc, 1);
1906 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.",
1907 en ? "Map" : "Unmap", queue_id, vector_id, ret);
1915 hns3_setup_dcb(struct rte_eth_dev *dev)
1917 struct hns3_adapter *hns = dev->data->dev_private;
1918 struct hns3_hw *hw = &hns->hw;
1921 if (!hns3_dev_get_support(hw, DCB)) {
1922 hns3_err(hw, "this port does not support dcb configurations.");
1926 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
1927 hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
1931 ret = hns3_dcb_configure(hns);
1933 hns3_err(hw, "failed to config dcb: %d", ret);
1939 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
1944 * Some hardware doesn't support auto-negotiation, but users may not
1945 * configure link_speeds (default 0), which means auto-negotiation.
1946 * In this case, it should return success.
1948 if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
1949 hw->mac.support_autoneg == 0)
1952 if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
1953 ret = hns3_check_port_speed(hw, link_speeds);
1962 hns3_check_dev_conf(struct rte_eth_dev *dev)
1964 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1965 struct rte_eth_conf *conf = &dev->data->dev_conf;
1968 ret = hns3_check_mq_mode(dev);
1972 return hns3_check_link_speed(hw, conf->link_speeds);
1976 hns3_dev_configure(struct rte_eth_dev *dev)
1978 struct hns3_adapter *hns = dev->data->dev_private;
1979 struct rte_eth_conf *conf = &dev->data->dev_conf;
1980 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
1981 struct hns3_hw *hw = &hns->hw;
1982 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1983 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1984 struct rte_eth_rss_conf rss_conf;
1988 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
1991 * Some versions of hardware network engine does not support
1992 * individually enable/disable/reset the Tx or Rx queue. These devices
1993 * must enable/disable/reset Tx and Rx queues at the same time. When the
1994 * numbers of Tx queues allocated by upper applications are not equal to
1995 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
1996 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
1997 * work as usual. But these fake queues are imperceptible, and can not
1998 * be used by upper applications.
2000 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
2002 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
2003 hw->cfg_max_queues = 0;
2007 hw->adapter_state = HNS3_NIC_CONFIGURING;
2008 ret = hns3_check_dev_conf(dev);
2012 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
2013 ret = hns3_setup_dcb(dev);
2018 /* When RSS is not configured, redirect the packet queue 0 */
2019 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
2020 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
2021 rss_conf = conf->rx_adv_conf.rss_conf;
2022 hw->rss_dis_flag = false;
2023 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
2028 ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu);
2032 ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf);
2036 ret = hns3_dev_configure_vlan(dev);
2040 /* config hardware GRO */
2041 gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
2042 ret = hns3_config_gro(hw, gro_en);
2046 hns3_init_rx_ptype_tble(dev);
2047 hw->adapter_state = HNS3_NIC_CONFIGURED;
2052 hw->cfg_max_queues = 0;
2053 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
2054 hw->adapter_state = HNS3_NIC_INITIALIZED;
2060 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
2062 struct hns3_config_max_frm_size_cmd *req;
2063 struct hns3_cmd_desc desc;
2065 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);
2067 req = (struct hns3_config_max_frm_size_cmd *)desc.data;
2068 req->max_frm_size = rte_cpu_to_le_16(new_mps);
2069 req->min_frm_size = RTE_ETHER_MIN_LEN;
2071 return hns3_cmd_send(hw, &desc, 1);
2075 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
2077 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2081 ret = hns3_set_mac_mtu(hw, mps);
2083 hns3_err(hw, "failed to set mtu, ret = %d", ret);
2087 ret = hns3_buffer_alloc(hw);
2089 hns3_err(hw, "failed to allocate buffer, ret = %d", ret);
2098 err = hns3_set_mac_mtu(hw, hns->pf.mps);
2100 hns3_err(hw, "fail to rollback MTU, err = %d", err);
2106 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2108 struct hns3_adapter *hns = dev->data->dev_private;
2109 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
2110 struct hns3_hw *hw = &hns->hw;
2113 if (dev->data->dev_started) {
2114 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
2115 "before configuration", dev->data->port_id);
2119 rte_spinlock_lock(&hw->lock);
2120 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
2123 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely
2124 * assign to "uint16_t" type variable.
2126 ret = hns3_config_mtu(hw, (uint16_t)frame_size);
2128 rte_spinlock_unlock(&hw->lock);
2129 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d",
2130 dev->data->port_id, mtu, ret);
2134 rte_spinlock_unlock(&hw->lock);
2140 hns3_get_copper_port_speed_capa(uint32_t supported_speed)
2142 uint32_t speed_capa = 0;
2144 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
2145 speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
2146 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
2147 speed_capa |= RTE_ETH_LINK_SPEED_10M;
2148 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
2149 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
2150 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
2151 speed_capa |= RTE_ETH_LINK_SPEED_100M;
2152 if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
2153 speed_capa |= RTE_ETH_LINK_SPEED_1G;
2159 hns3_get_firber_port_speed_capa(uint32_t supported_speed)
2161 uint32_t speed_capa = 0;
2163 if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
2164 speed_capa |= RTE_ETH_LINK_SPEED_1G;
2165 if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
2166 speed_capa |= RTE_ETH_LINK_SPEED_10G;
2167 if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
2168 speed_capa |= RTE_ETH_LINK_SPEED_25G;
2169 if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
2170 speed_capa |= RTE_ETH_LINK_SPEED_40G;
2171 if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
2172 speed_capa |= RTE_ETH_LINK_SPEED_50G;
2173 if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
2174 speed_capa |= RTE_ETH_LINK_SPEED_100G;
2175 if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
2176 speed_capa |= RTE_ETH_LINK_SPEED_200G;
2182 hns3_get_speed_capa(struct hns3_hw *hw)
2184 struct hns3_mac *mac = &hw->mac;
2185 uint32_t speed_capa;
2187 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
2189 hns3_get_copper_port_speed_capa(mac->supported_speed);
2192 hns3_get_firber_port_speed_capa(mac->supported_speed);
2194 if (mac->support_autoneg == 0)
2195 speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
2201 hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
2203 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2206 (void)hns3_update_link_status(hw);
2208 ret = hns3_update_link_info(eth_dev);
2210 hw->mac.link_status = RTE_ETH_LINK_DOWN;
2216 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
2217 struct rte_eth_link *new_link)
2219 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2220 struct hns3_mac *mac = &hw->mac;
2222 switch (mac->link_speed) {
2223 case RTE_ETH_SPEED_NUM_10M:
2224 case RTE_ETH_SPEED_NUM_100M:
2225 case RTE_ETH_SPEED_NUM_1G:
2226 case RTE_ETH_SPEED_NUM_10G:
2227 case RTE_ETH_SPEED_NUM_25G:
2228 case RTE_ETH_SPEED_NUM_40G:
2229 case RTE_ETH_SPEED_NUM_50G:
2230 case RTE_ETH_SPEED_NUM_100G:
2231 case RTE_ETH_SPEED_NUM_200G:
2232 if (mac->link_status)
2233 new_link->link_speed = mac->link_speed;
2236 if (mac->link_status)
2237 new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
2241 if (!mac->link_status)
2242 new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
2244 new_link->link_duplex = mac->link_duplex;
2245 new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
2246 new_link->link_autoneg = mac->link_autoneg;
2250 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
2252 #define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */
2253 #define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */
2255 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2256 uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES;
2257 struct hns3_mac *mac = &hw->mac;
2258 struct rte_eth_link new_link;
2261 /* When port is stopped, report link down. */
2262 if (eth_dev->data->dev_started == 0) {
2263 new_link.link_autoneg = mac->link_autoneg;
2264 new_link.link_duplex = mac->link_duplex;
2265 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2266 new_link.link_status = RTE_ETH_LINK_DOWN;
2271 ret = hns3_update_port_link_info(eth_dev);
2273 hns3_err(hw, "failed to get port link info, ret = %d.",
2278 if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
2281 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
2282 } while (retry_cnt--);
2284 memset(&new_link, 0, sizeof(new_link));
2285 hns3_setup_linkstatus(eth_dev, &new_link);
2288 return rte_eth_linkstatus_set(eth_dev, &new_link);
2292 hns3_dev_set_link_up(struct rte_eth_dev *dev)
2294 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2298 * The "tx_pkt_burst" will be restored. But the secondary process does
2299 * not support the mechanism for notifying the primary process.
2301 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2302 hns3_err(hw, "secondary process does not support to set link up.");
2307 * If device isn't started Rx/Tx function is still disabled, setting
2308 * link up is not allowed. But it is probably better to return success
2309 * to reduce the impact on the upper layer.
2311 if (hw->adapter_state != HNS3_NIC_STARTED) {
2312 hns3_info(hw, "device isn't started, can't set link up.");
2316 if (!hw->set_link_down)
2319 rte_spinlock_lock(&hw->lock);
2320 ret = hns3_cfg_mac_mode(hw, true);
2322 rte_spinlock_unlock(&hw->lock);
2323 hns3_err(hw, "failed to set link up, ret = %d", ret);
2327 hw->set_link_down = false;
2328 hns3_start_tx_datapath(dev);
2329 rte_spinlock_unlock(&hw->lock);
2335 hns3_dev_set_link_down(struct rte_eth_dev *dev)
2337 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2341 * The "tx_pkt_burst" will be set to dummy function. But the secondary
2342 * process does not support the mechanism for notifying the primary
2345 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2346 hns3_err(hw, "secondary process does not support to set link down.");
2351 * If device isn't started or the API has been called, link status is
2352 * down, return success.
2354 if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down)
2357 rte_spinlock_lock(&hw->lock);
2358 hns3_stop_tx_datapath(dev);
2359 ret = hns3_cfg_mac_mode(hw, false);
2361 hns3_start_tx_datapath(dev);
2362 rte_spinlock_unlock(&hw->lock);
2363 hns3_err(hw, "failed to set link down, ret = %d", ret);
2367 hw->set_link_down = true;
2368 rte_spinlock_unlock(&hw->lock);
2374 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
2376 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2377 struct hns3_pf *pf = &hns->pf;
2379 if (!(status->pf_state & HNS3_PF_STATE_DONE))
2382 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;
2388 hns3_query_function_status(struct hns3_hw *hw)
2390 #define HNS3_QUERY_MAX_CNT 10
2391 #define HNS3_QUERY_SLEEP_MSCOEND 1
2392 struct hns3_func_status_cmd *req;
2393 struct hns3_cmd_desc desc;
2397 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);
2398 req = (struct hns3_func_status_cmd *)desc.data;
2401 ret = hns3_cmd_send(hw, &desc, 1);
2403 PMD_INIT_LOG(ERR, "query function status failed %d",
2408 /* Check pf reset is done */
2412 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);
2413 } while (timeout++ < HNS3_QUERY_MAX_CNT);
2415 return hns3_parse_func_status(hw, req);
2419 hns3_get_pf_max_tqp_num(struct hns3_hw *hw)
2421 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2422 struct hns3_pf *pf = &hns->pf;
2424 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) {
2426 * The total_tqps_num obtained from firmware is maximum tqp
2427 * numbers of this port, which should be used for PF and VFs.
2428 * There is no need for pf to have so many tqp numbers in
2429 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
2430 * coming from config file, is assigned to maximum queue number
2431 * for the PF of this port by user. So users can modify the
2432 * maximum queue number of PF according to their own application
2433 * scenarios, which is more flexible to use. In addition, many
2434 * memories can be saved due to allocating queue statistics
2435 * room according to the actual number of queues required. The
2436 * maximum queue number of PF for network engine with
2437 * revision_id greater than 0x30 is assigned by config file.
2439 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) {
2440 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) "
2441 "must be greater than 0.",
2442 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF);
2446 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
2447 hw->total_tqps_num);
2450 * Due to the limitation on the number of PF interrupts
2451 * available, the maximum queue number assigned to PF on
2452 * the network engine with revision_id 0x21 is 64.
2454 hw->tqps_num = RTE_MIN(hw->total_tqps_num,
2455 HNS3_MAX_TQP_NUM_HIP08_PF);
2462 hns3_query_pf_resource(struct hns3_hw *hw)
2464 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2465 struct hns3_pf *pf = &hns->pf;
2466 struct hns3_pf_res_cmd *req;
2467 struct hns3_cmd_desc desc;
2470 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
2471 ret = hns3_cmd_send(hw, &desc, 1);
2473 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret);
2477 req = (struct hns3_pf_res_cmd *)desc.data;
2478 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) +
2479 rte_le_to_cpu_16(req->ext_tqp_num);
2480 ret = hns3_get_pf_max_tqp_num(hw);
2484 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
2485 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number);
2487 if (req->tx_buf_size)
2489 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;
2491 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF;
2493 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);
2495 if (req->dv_buf_size)
2497 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;
2499 pf->dv_buf_size = HNS3_DEFAULT_DV;
2501 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
2504 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number),
2505 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
2511 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)
2513 struct hns3_cfg_param_cmd *req;
2514 uint64_t mac_addr_tmp_high;
2515 uint8_t ext_rss_size_max;
2516 uint64_t mac_addr_tmp;
2519 req = (struct hns3_cfg_param_cmd *)desc[0].data;
2521 /* get the configuration */
2522 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2523 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
2525 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2526 HNS3_CFG_PHY_ADDR_M,
2527 HNS3_CFG_PHY_ADDR_S);
2528 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2529 HNS3_CFG_MEDIA_TP_M,
2530 HNS3_CFG_MEDIA_TP_S);
2531 /* get mac address */
2532 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
2533 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2534 HNS3_CFG_MAC_ADDR_H_M,
2535 HNS3_CFG_MAC_ADDR_H_S);
2537 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
2539 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2540 HNS3_CFG_DEFAULT_SPEED_M,
2541 HNS3_CFG_DEFAULT_SPEED_S);
2542 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2543 HNS3_CFG_RSS_SIZE_M,
2544 HNS3_CFG_RSS_SIZE_S);
2546 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
2547 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
2549 req = (struct hns3_cfg_param_cmd *)desc[1].data;
2550 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);
2552 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2553 HNS3_CFG_SPEED_ABILITY_M,
2554 HNS3_CFG_SPEED_ABILITY_S);
2555 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2556 HNS3_CFG_UMV_TBL_SPACE_M,
2557 HNS3_CFG_UMV_TBL_SPACE_S);
2558 if (!cfg->umv_space)
2559 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;
2561 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]),
2562 HNS3_CFG_EXT_RSS_SIZE_M,
2563 HNS3_CFG_EXT_RSS_SIZE_S);
2565 * Field ext_rss_size_max obtained from firmware will be more flexible
2566 * for future changes and expansions, which is an exponent of 2, instead
2567 * of reading out directly. If this field is not zero, hns3 PF PMD
2568 * uses it as rss_size_max under one TC. Device, whose revision
2569 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the
2570 * maximum number of queues supported under a TC through this field.
2572 if (ext_rss_size_max)
2573 cfg->rss_size_max = 1U << ext_rss_size_max;
2576 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash
2577 * @hw: pointer to struct hns3_hw
2578 * @hcfg: the config structure to be getted
2581 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)
2583 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];
2584 struct hns3_cfg_param_cmd *req;
2589 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {
2591 req = (struct hns3_cfg_param_cmd *)desc[i].data;
2592 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,
2594 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,
2595 i * HNS3_CFG_RD_LEN_BYTES);
2596 /* Len should be divided by 4 when send to hardware */
2597 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,
2598 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);
2599 req->offset = rte_cpu_to_le_32(offset);
2602 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);
2604 PMD_INIT_LOG(ERR, "get config failed %d.", ret);
2608 hns3_parse_cfg(hcfg, desc);
2614 hns3_parse_speed(int speed_cmd, uint32_t *speed)
2616 switch (speed_cmd) {
2617 case HNS3_CFG_SPEED_10M:
2618 *speed = RTE_ETH_SPEED_NUM_10M;
2620 case HNS3_CFG_SPEED_100M:
2621 *speed = RTE_ETH_SPEED_NUM_100M;
2623 case HNS3_CFG_SPEED_1G:
2624 *speed = RTE_ETH_SPEED_NUM_1G;
2626 case HNS3_CFG_SPEED_10G:
2627 *speed = RTE_ETH_SPEED_NUM_10G;
2629 case HNS3_CFG_SPEED_25G:
2630 *speed = RTE_ETH_SPEED_NUM_25G;
2632 case HNS3_CFG_SPEED_40G:
2633 *speed = RTE_ETH_SPEED_NUM_40G;
2635 case HNS3_CFG_SPEED_50G:
2636 *speed = RTE_ETH_SPEED_NUM_50G;
2638 case HNS3_CFG_SPEED_100G:
2639 *speed = RTE_ETH_SPEED_NUM_100G;
2641 case HNS3_CFG_SPEED_200G:
2642 *speed = RTE_ETH_SPEED_NUM_200G;
2652 hns3_set_default_dev_specifications(struct hns3_hw *hw)
2654 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
2655 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
2656 hw->rss_key_size = HNS3_RSS_KEY_SIZE;
2657 hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
2658 hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
2662 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
2664 struct hns3_dev_specs_0_cmd *req0;
2666 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
2668 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
2669 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
2670 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
2671 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
2672 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
2676 hns3_check_dev_specifications(struct hns3_hw *hw)
2678 if (hw->rss_ind_tbl_size == 0 ||
2679 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
2680 hns3_err(hw, "the size of hash lookup table configured (%u)"
2681 " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
2682 HNS3_RSS_IND_TBL_SIZE_MAX);
2690 hns3_query_dev_specifications(struct hns3_hw *hw)
2692 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
2696 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
2697 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
2699 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
2701 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
2703 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
2707 hns3_parse_dev_specifications(hw, desc);
2709 return hns3_check_dev_specifications(hw);
2713 hns3_get_capability(struct hns3_hw *hw)
2715 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2716 struct rte_pci_device *pci_dev;
2717 struct hns3_pf *pf = &hns->pf;
2718 struct rte_eth_dev *eth_dev;
2722 eth_dev = &rte_eth_devices[hw->data->port_id];
2723 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2724 device_id = pci_dev->id.device_id;
2726 if (device_id == HNS3_DEV_ID_25GE_RDMA ||
2727 device_id == HNS3_DEV_ID_50GE_RDMA ||
2728 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC ||
2729 device_id == HNS3_DEV_ID_200G_RDMA)
2730 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
2732 ret = hns3_get_pci_revision_id(hw, &hw->revision);
2736 ret = hns3_query_mac_stats_reg_num(hw);
2740 if (hw->revision < PCI_REVISION_ID_HIP09_A) {
2741 hns3_set_default_dev_specifications(hw);
2742 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
2743 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
2744 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
2745 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE;
2746 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
2747 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
2748 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE;
2749 hw->rss_info.ipv6_sctp_offload_supported = false;
2750 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE;
2751 pf->support_multi_tc_pause = false;
2755 ret = hns3_query_dev_specifications(hw);
2758 "failed to query dev specifications, ret = %d",
2763 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
2764 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
2765 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
2766 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE;
2767 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
2768 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
2769 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE;
2770 hw->rss_info.ipv6_sctp_offload_supported = true;
2771 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE;
2772 pf->support_multi_tc_pause = true;
2778 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type)
2782 switch (media_type) {
2783 case HNS3_MEDIA_TYPE_COPPER:
2784 if (!hns3_dev_get_support(hw, COPPER)) {
2786 "Media type is copper, not supported.");
2792 case HNS3_MEDIA_TYPE_FIBER:
2795 case HNS3_MEDIA_TYPE_BACKPLANE:
2796 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported.");
2800 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type);
2809 hns3_get_board_configuration(struct hns3_hw *hw)
2811 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2812 struct hns3_pf *pf = &hns->pf;
2813 struct hns3_cfg cfg;
2816 ret = hns3_get_board_cfg(hw, &cfg);
2818 PMD_INIT_LOG(ERR, "get board config failed %d", ret);
2822 ret = hns3_check_media_type(hw, cfg.media_type);
2826 hw->mac.media_type = cfg.media_type;
2827 hw->rss_size_max = cfg.rss_size_max;
2828 hw->rss_dis_flag = false;
2829 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
2830 hw->mac.phy_addr = cfg.phy_addr;
2831 hw->dcb_info.num_pg = 1;
2832 hw->dcb_info.hw_pfc_map = 0;
2834 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
2836 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d",
2837 cfg.default_speed, ret);
2841 pf->tc_max = cfg.tc_num;
2842 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) {
2843 PMD_INIT_LOG(WARNING,
2844 "Get TC num(%u) from flash, set TC num to 1",
2849 /* Dev does not support DCB */
2850 if (!hns3_dev_get_support(hw, DCB)) {
2854 pf->pfc_max = pf->tc_max;
2856 hw->dcb_info.num_tc = 1;
2857 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max,
2858 hw->tqps_num / hw->dcb_info.num_tc);
2859 hns3_set_bit(hw->hw_tc_map, 0, 1);
2860 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE;
2862 pf->wanted_umv_size = cfg.umv_space;
2868 hns3_get_configuration(struct hns3_hw *hw)
2872 ret = hns3_query_function_status(hw);
2874 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret);
2878 /* Get device capability */
2879 ret = hns3_get_capability(hw);
2881 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
2885 /* Get pf resource */
2886 ret = hns3_query_pf_resource(hw);
2888 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret);
2892 ret = hns3_get_board_configuration(hw);
2894 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret);
2898 ret = hns3_query_dev_fec_info(hw);
2901 "failed to query FEC information, ret = %d", ret);
2907 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid,
2908 uint16_t tqp_vid, bool is_pf)
2910 struct hns3_tqp_map_cmd *req;
2911 struct hns3_cmd_desc desc;
2914 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false);
2916 req = (struct hns3_tqp_map_cmd *)desc.data;
2917 req->tqp_id = rte_cpu_to_le_16(tqp_pid);
2918 req->tqp_vf = func_id;
2919 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B;
2921 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B);
2922 req->tqp_vid = rte_cpu_to_le_16(tqp_vid);
2924 ret = hns3_cmd_send(hw, &desc, 1);
2926 PMD_INIT_LOG(ERR, "TQP map failed %d", ret);
2932 hns3_map_tqp(struct hns3_hw *hw)
2938 * In current version, VF is not supported when PF is driven by DPDK
2939 * driver, so we assign total tqps_num tqps allocated to this port
2942 for (i = 0; i < hw->total_tqps_num; i++) {
2943 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true);
2952 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
2954 struct hns3_config_mac_speed_dup_cmd *req;
2955 struct hns3_cmd_desc desc;
2958 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data;
2960 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false);
2962 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
2965 case RTE_ETH_SPEED_NUM_10M:
2966 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2967 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
2969 case RTE_ETH_SPEED_NUM_100M:
2970 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2971 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
2973 case RTE_ETH_SPEED_NUM_1G:
2974 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2975 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
2977 case RTE_ETH_SPEED_NUM_10G:
2978 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2979 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
2981 case RTE_ETH_SPEED_NUM_25G:
2982 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2983 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
2985 case RTE_ETH_SPEED_NUM_40G:
2986 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2987 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
2989 case RTE_ETH_SPEED_NUM_50G:
2990 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2991 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
2993 case RTE_ETH_SPEED_NUM_100G:
2994 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2995 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
2997 case RTE_ETH_SPEED_NUM_200G:
2998 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2999 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
3002 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed);
3006 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1);
3008 ret = hns3_cmd_send(hw, &desc, 1);
3010 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret);
3016 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3018 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3019 struct hns3_pf *pf = &hns->pf;
3020 struct hns3_priv_buf *priv;
3021 uint32_t i, total_size;
3023 total_size = pf->pkt_buf_size;
3025 /* alloc tx buffer for all enabled tc */
3026 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3027 priv = &buf_alloc->priv_buf[i];
3029 if (hw->hw_tc_map & BIT(i)) {
3030 if (total_size < pf->tx_buf_size)
3033 priv->tx_buf_size = pf->tx_buf_size;
3035 priv->tx_buf_size = 0;
3037 total_size -= priv->tx_buf_size;
3044 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3046 /* TX buffer size is unit by 128 byte */
3047 #define HNS3_BUF_SIZE_UNIT_SHIFT 7
3048 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15)
3049 struct hns3_tx_buff_alloc_cmd *req;
3050 struct hns3_cmd_desc desc;
3055 req = (struct hns3_tx_buff_alloc_cmd *)desc.data;
3057 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0);
3058 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3059 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
3061 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT;
3062 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size |
3063 HNS3_BUF_SIZE_UPDATE_EN_MSK);
3066 ret = hns3_cmd_send(hw, &desc, 1);
3068 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret);
3074 hns3_get_tc_num(struct hns3_hw *hw)
3079 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3080 if (hw->hw_tc_map & BIT(i))
3086 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3088 struct hns3_priv_buf *priv;
3089 uint32_t rx_priv = 0;
3092 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3093 priv = &buf_alloc->priv_buf[i];
3095 rx_priv += priv->buf_size;
3101 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3103 uint32_t total_tx_size = 0;
3106 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3107 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
3109 return total_tx_size;
3112 /* Get the number of pfc enabled TCs, which have private buffer */
3114 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3116 struct hns3_priv_buf *priv;
3120 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3121 priv = &buf_alloc->priv_buf[i];
3122 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3129 /* Get the number of pfc disabled TCs, which have private buffer */
3131 hns3_get_no_pfc_priv_num(struct hns3_hw *hw,
3132 struct hns3_pkt_buf_alloc *buf_alloc)
3134 struct hns3_priv_buf *priv;
3138 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3139 priv = &buf_alloc->priv_buf[i];
3140 if (hw->hw_tc_map & BIT(i) &&
3141 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3149 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
3152 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
3153 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3154 struct hns3_pf *pf = &hns->pf;
3155 uint32_t shared_buf, aligned_mps;
3160 tc_num = hns3_get_tc_num(hw);
3161 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3163 if (hns3_dev_get_support(hw, DCB))
3164 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
3167 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF
3170 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
3171 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc),
3172 HNS3_BUF_SIZE_UNIT);
3174 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);
3175 if (rx_all < rx_priv + shared_std)
3178 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
3179 buf_alloc->s_buf.buf_size = shared_buf;
3180 if (hns3_dev_get_support(hw, DCB)) {
3181 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
3182 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
3183 - roundup(aligned_mps / HNS3_BUF_DIV_BY,
3184 HNS3_BUF_SIZE_UNIT);
3186 buf_alloc->s_buf.self.high =
3187 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3188 buf_alloc->s_buf.self.low = aligned_mps;
3191 if (hns3_dev_get_support(hw, DCB)) {
3192 hi_thrd = shared_buf - pf->dv_buf_size;
3194 if (tc_num <= NEED_RESERVE_TC_NUM)
3195 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT /
3199 hi_thrd = hi_thrd / tc_num;
3201 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps);
3202 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);
3203 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;
3205 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3206 lo_thrd = aligned_mps;
3209 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3210 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
3211 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
3218 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max,
3219 struct hns3_pkt_buf_alloc *buf_alloc)
3221 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3222 struct hns3_pf *pf = &hns->pf;
3223 struct hns3_priv_buf *priv;
3224 uint32_t aligned_mps;
3228 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3229 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3231 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3232 priv = &buf_alloc->priv_buf[i];
3239 if (!(hw->hw_tc_map & BIT(i)))
3243 if (hw->dcb_info.hw_pfc_map & BIT(i)) {
3244 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT;
3245 priv->wl.high = roundup(priv->wl.low + aligned_mps,
3246 HNS3_BUF_SIZE_UNIT);
3249 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) :
3253 priv->buf_size = priv->wl.high + pf->dv_buf_size;
3256 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3260 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw,
3261 struct hns3_pkt_buf_alloc *buf_alloc)
3263 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3264 struct hns3_pf *pf = &hns->pf;
3265 struct hns3_priv_buf *priv;
3266 int no_pfc_priv_num;
3271 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3272 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc);
3274 /* let the last to be cleared first */
3275 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3276 priv = &buf_alloc->priv_buf[i];
3277 mask = BIT((uint8_t)i);
3278 if (hw->hw_tc_map & mask &&
3279 !(hw->dcb_info.hw_pfc_map & mask)) {
3280 /* Clear the no pfc TC private buffer */
3288 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3289 no_pfc_priv_num == 0)
3293 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3297 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
3298 struct hns3_pkt_buf_alloc *buf_alloc)
3300 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3301 struct hns3_pf *pf = &hns->pf;
3302 struct hns3_priv_buf *priv;
3308 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3309 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc);
3311 /* let the last to be cleared first */
3312 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3313 priv = &buf_alloc->priv_buf[i];
3314 mask = BIT((uint8_t)i);
3315 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) {
3316 /* Reduce the number of pfc TC with private buffer */
3323 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3328 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3332 hns3_only_alloc_priv_buff(struct hns3_hw *hw,
3333 struct hns3_pkt_buf_alloc *buf_alloc)
3335 #define COMPENSATE_BUFFER 0x3C00
3336 #define COMPENSATE_HALF_MPS_NUM 5
3337 #define PRIV_WL_GAP 0x1800
3338 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3339 struct hns3_pf *pf = &hns->pf;
3340 uint32_t tc_num = hns3_get_tc_num(hw);
3341 uint32_t half_mps = pf->mps >> 1;
3342 struct hns3_priv_buf *priv;
3343 uint32_t min_rx_priv;
3347 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3349 rx_priv = rx_priv / tc_num;
3351 if (tc_num <= NEED_RESERVE_TC_NUM)
3352 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
3355 * Minimum value of private buffer in rx direction (min_rx_priv) is
3356 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private
3357 * buffer if rx_priv is greater than min_rx_priv.
3359 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER +
3360 COMPENSATE_HALF_MPS_NUM * half_mps;
3361 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);
3362 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);
3363 if (rx_priv < min_rx_priv)
3366 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3367 priv = &buf_alloc->priv_buf[i];
3373 if (!(hw->hw_tc_map & BIT(i)))
3377 priv->buf_size = rx_priv;
3378 priv->wl.high = rx_priv - pf->dv_buf_size;
3379 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
3382 buf_alloc->s_buf.buf_size = 0;
3388 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
3389 * @hw: pointer to struct hns3_hw
3390 * @buf_alloc: pointer to buffer calculation data
3391 * @return: 0: calculate successful, negative: fail
3394 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3396 /* When DCB is not supported, rx private buffer is not allocated. */
3397 if (!hns3_dev_get_support(hw, DCB)) {
3398 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3399 struct hns3_pf *pf = &hns->pf;
3400 uint32_t rx_all = pf->pkt_buf_size;
3402 rx_all -= hns3_get_tx_buff_alloced(buf_alloc);
3403 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all))
3410 * Try to allocate privated packet buffer for all TCs without share
3413 if (hns3_only_alloc_priv_buff(hw, buf_alloc))
3417 * Try to allocate privated packet buffer for all TCs with share
3420 if (hns3_rx_buf_calc_all(hw, true, buf_alloc))
3424 * For different application scenes, the enabled port number, TC number
3425 * and no_drop TC number are different. In order to obtain the better
3426 * performance, software could allocate the buffer size and configure
3427 * the waterline by trying to decrease the private buffer size according
3428 * to the order, namely, waterline of valid tc, pfc disabled tc, pfc
3431 if (hns3_rx_buf_calc_all(hw, false, buf_alloc))
3434 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc))
3437 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc))
3444 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3446 struct hns3_rx_priv_buff_cmd *req;
3447 struct hns3_cmd_desc desc;
3452 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false);
3453 req = (struct hns3_rx_priv_buff_cmd *)desc.data;
3455 /* Alloc private buffer TCs */
3456 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3457 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i];
3460 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S);
3461 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B);
3464 buf_size = buf_alloc->s_buf.buf_size;
3465 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) |
3466 (1 << HNS3_TC0_PRI_BUF_EN_B));
3468 ret = hns3_cmd_send(hw, &desc, 1);
3470 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret);
3476 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3478 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2
3479 struct hns3_rx_priv_wl_buf *req;
3480 struct hns3_priv_buf *priv;
3481 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM];
3485 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) {
3486 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC,
3488 req = (struct hns3_rx_priv_wl_buf *)desc[i].data;
3490 /* The first descriptor set the NEXT bit to 1 */
3492 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3494 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3496 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3497 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j;
3499 priv = &buf_alloc->priv_buf[idx];
3500 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >>
3502 req->tc_wl[j].high |=
3503 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3504 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >>
3506 req->tc_wl[j].low |=
3507 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3511 /* Send 2 descriptor at one time */
3512 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM);
3514 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d",
3520 hns3_common_thrd_config(struct hns3_hw *hw,
3521 struct hns3_pkt_buf_alloc *buf_alloc)
3523 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2
3524 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf;
3525 struct hns3_rx_com_thrd *req;
3526 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM];
3527 struct hns3_tc_thrd *tc;
3532 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) {
3533 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC,
3535 req = (struct hns3_rx_com_thrd *)&desc[i].data;
3537 /* The first descriptor set the NEXT bit to 1 */
3539 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3541 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3543 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3544 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j;
3545 tc = &s_buf->tc_thrd[tc_idx];
3547 req->com_thrd[j].high =
3548 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S);
3549 req->com_thrd[j].high |=
3550 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3551 req->com_thrd[j].low =
3552 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S);
3553 req->com_thrd[j].low |=
3554 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3558 /* Send 2 descriptors at one time */
3559 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM);
3561 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret);
3567 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3569 struct hns3_shared_buf *buf = &buf_alloc->s_buf;
3570 struct hns3_rx_com_wl *req;
3571 struct hns3_cmd_desc desc;
3574 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false);
3576 req = (struct hns3_rx_com_wl *)desc.data;
3577 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S);
3578 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3580 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S);
3581 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3583 ret = hns3_cmd_send(hw, &desc, 1);
3585 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret);
3591 hns3_buffer_alloc(struct hns3_hw *hw)
3593 struct hns3_pkt_buf_alloc pkt_buf;
3596 memset(&pkt_buf, 0, sizeof(pkt_buf));
3597 ret = hns3_tx_buffer_calc(hw, &pkt_buf);
3600 "could not calc tx buffer size for all TCs %d",
3605 ret = hns3_tx_buffer_alloc(hw, &pkt_buf);
3607 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret);
3611 ret = hns3_rx_buffer_calc(hw, &pkt_buf);
3614 "could not calc rx priv buffer size for all TCs %d",
3619 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf);
3621 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret);
3625 if (hns3_dev_get_support(hw, DCB)) {
3626 ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
3629 "could not configure rx private waterline %d",
3634 ret = hns3_common_thrd_config(hw, &pkt_buf);
3637 "could not configure common threshold %d",
3643 ret = hns3_common_wl_config(hw, &pkt_buf);
3645 PMD_INIT_LOG(ERR, "could not configure common waterline %d",
3652 hns3_mac_init(struct hns3_hw *hw)
3654 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3655 struct hns3_mac *mac = &hw->mac;
3656 struct hns3_pf *pf = &hns->pf;
3659 pf->support_sfp_query = true;
3660 mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3661 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
3663 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
3667 mac->link_status = RTE_ETH_LINK_DOWN;
3669 return hns3_config_mtu(hw, pf->mps);
3673 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
3675 #define HNS3_ETHERTYPE_SUCCESS_ADD 0
3676 #define HNS3_ETHERTYPE_ALREADY_ADD 1
3677 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2
3678 #define HNS3_ETHERTYPE_KEY_CONFLICT 3
3683 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
3688 switch (resp_code) {
3689 case HNS3_ETHERTYPE_SUCCESS_ADD:
3690 case HNS3_ETHERTYPE_ALREADY_ADD:
3693 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW:
3695 "add mac ethertype failed for manager table overflow.");
3696 return_status = -EIO;
3698 case HNS3_ETHERTYPE_KEY_CONFLICT:
3699 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict.");
3700 return_status = -EIO;
3704 "add mac ethertype failed for undefined, code=%u.",
3706 return_status = -EIO;
3710 return return_status;
3714 hns3_add_mgr_tbl(struct hns3_hw *hw,
3715 const struct hns3_mac_mgr_tbl_entry_cmd *req)
3717 struct hns3_cmd_desc desc;
3722 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false);
3723 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd));
3725 ret = hns3_cmd_send(hw, &desc, 1);
3728 "add mac ethertype failed for cmd_send, ret =%d.",
3733 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
3734 retval = rte_le_to_cpu_16(desc.retval);
3736 return hns3_get_mac_ethertype_cmd_status(retval, resp_code);
3740 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table,
3741 int *table_item_num)
3743 struct hns3_mac_mgr_tbl_entry_cmd *tbl;
3746 * In current version, we add one item in management table as below:
3747 * 0x0180C200000E -- LLDP MC address
3750 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B;
3751 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP);
3752 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200));
3753 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E));
3754 tbl->i_port_bitmap = 0x1;
3755 *table_item_num = 1;
3759 hns3_init_mgr_tbl(struct hns3_hw *hw)
3761 #define HNS_MAC_MGR_TBL_MAX_SIZE 16
3762 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE];
3767 memset(mgr_table, 0, sizeof(mgr_table));
3768 hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
3769 for (i = 0; i < table_item_num; i++) {
3770 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
3772 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
3782 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
3783 bool en_mc, bool en_bc, int vport_id)
3788 memset(param, 0, sizeof(struct hns3_promisc_param));
3790 param->enable = HNS3_PROMISC_EN_UC;
3792 param->enable |= HNS3_PROMISC_EN_MC;
3794 param->enable |= HNS3_PROMISC_EN_BC;
3795 param->vf_id = vport_id;
3799 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
3801 struct hns3_promisc_cfg_cmd *req;
3802 struct hns3_cmd_desc desc;
3805 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
3807 req = (struct hns3_promisc_cfg_cmd *)desc.data;
3808 req->vf_id = param->vf_id;
3809 req->flag = (param->enable << HNS3_PROMISC_EN_B) |
3810 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
3812 ret = hns3_cmd_send(hw, &desc, 1);
3814 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret);
3820 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
3822 struct hns3_promisc_param param;
3823 bool en_bc_pmc = true;
3827 * In current version VF is not supported when PF is driven by DPDK
3828 * driver, just need to configure parameters for PF vport.
3830 vf_id = HNS3_PF_FUNC_ID;
3832 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
3833 return hns3_cmd_set_promisc_mode(hw, ¶m);
3837 hns3_promisc_init(struct hns3_hw *hw)
3839 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3840 struct hns3_pf *pf = &hns->pf;
3841 struct hns3_promisc_param param;
3845 ret = hns3_set_promisc_mode(hw, false, false);
3847 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret);
3852 * In current version VFs are not supported when PF is driven by DPDK
3853 * driver. After PF has been taken over by DPDK, the original VF will
3854 * be invalid. So, there is a possibility of entry residues. It should
3855 * clear VFs's promisc mode to avoid unnecessary bandwidth usage
3858 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) {
3859 hns3_promisc_param_init(¶m, false, false, false, func_id);
3860 ret = hns3_cmd_set_promisc_mode(hw, ¶m);
3862 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode,"
3863 " ret = %d", func_id, ret);
3872 hns3_promisc_uninit(struct hns3_hw *hw)
3874 struct hns3_promisc_param param;
3878 func_id = HNS3_PF_FUNC_ID;
3881 * In current version VFs are not supported when PF is driven by
3882 * DPDK driver, and VFs' promisc mode status has been cleared during
3883 * init and their status will not change. So just clear PF's promisc
3884 * mode status during uninit.
3886 hns3_promisc_param_init(¶m, false, false, false, func_id);
3887 ret = hns3_cmd_set_promisc_mode(hw, ¶m);
3889 PMD_INIT_LOG(ERR, "failed to clear promisc status during"
3890 " uninit, ret = %d", ret);
3894 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
3896 bool allmulti = dev->data->all_multicast ? true : false;
3897 struct hns3_adapter *hns = dev->data->dev_private;
3898 struct hns3_hw *hw = &hns->hw;
3903 rte_spinlock_lock(&hw->lock);
3904 ret = hns3_set_promisc_mode(hw, true, true);
3906 rte_spinlock_unlock(&hw->lock);
3907 hns3_err(hw, "failed to enable promiscuous mode, ret = %d",
3913 * When promiscuous mode was enabled, disable the vlan filter to let
3914 * all packets coming in in the receiving direction.
3916 offloads = dev->data->dev_conf.rxmode.offloads;
3917 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
3918 ret = hns3_enable_vlan_filter(hns, false);
3920 hns3_err(hw, "failed to enable promiscuous mode due to "
3921 "failure to disable vlan filter, ret = %d",
3923 err = hns3_set_promisc_mode(hw, false, allmulti);
3925 hns3_err(hw, "failed to restore promiscuous "
3926 "status after disable vlan filter "
3927 "failed during enabling promiscuous "
3928 "mode, ret = %d", ret);
3932 rte_spinlock_unlock(&hw->lock);
3938 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
3940 bool allmulti = dev->data->all_multicast ? true : false;
3941 struct hns3_adapter *hns = dev->data->dev_private;
3942 struct hns3_hw *hw = &hns->hw;
3947 /* If now in all_multicast mode, must remain in all_multicast mode. */
3948 rte_spinlock_lock(&hw->lock);
3949 ret = hns3_set_promisc_mode(hw, false, allmulti);
3951 rte_spinlock_unlock(&hw->lock);
3952 hns3_err(hw, "failed to disable promiscuous mode, ret = %d",
3956 /* when promiscuous mode was disabled, restore the vlan filter status */
3957 offloads = dev->data->dev_conf.rxmode.offloads;
3958 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
3959 ret = hns3_enable_vlan_filter(hns, true);
3961 hns3_err(hw, "failed to disable promiscuous mode due to"
3962 " failure to restore vlan filter, ret = %d",
3964 err = hns3_set_promisc_mode(hw, true, true);
3966 hns3_err(hw, "failed to restore promiscuous "
3967 "status after enabling vlan filter "
3968 "failed during disabling promiscuous "
3969 "mode, ret = %d", ret);
3972 rte_spinlock_unlock(&hw->lock);
3978 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
3980 struct hns3_adapter *hns = dev->data->dev_private;
3981 struct hns3_hw *hw = &hns->hw;
3984 if (dev->data->promiscuous)
3987 rte_spinlock_lock(&hw->lock);
3988 ret = hns3_set_promisc_mode(hw, false, true);
3989 rte_spinlock_unlock(&hw->lock);
3991 hns3_err(hw, "failed to enable allmulticast mode, ret = %d",
3998 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
4000 struct hns3_adapter *hns = dev->data->dev_private;
4001 struct hns3_hw *hw = &hns->hw;
4004 /* If now in promiscuous mode, must remain in all_multicast mode. */
4005 if (dev->data->promiscuous)
4008 rte_spinlock_lock(&hw->lock);
4009 ret = hns3_set_promisc_mode(hw, false, false);
4010 rte_spinlock_unlock(&hw->lock);
4012 hns3_err(hw, "failed to disable allmulticast mode, ret = %d",
4019 hns3_dev_promisc_restore(struct hns3_adapter *hns)
4021 struct hns3_hw *hw = &hns->hw;
4022 bool allmulti = hw->data->all_multicast ? true : false;
4025 if (hw->data->promiscuous) {
4026 ret = hns3_set_promisc_mode(hw, true, true);
4028 hns3_err(hw, "failed to restore promiscuous mode, "
4033 ret = hns3_set_promisc_mode(hw, false, allmulti);
4035 hns3_err(hw, "failed to restore allmulticast mode, ret = %d",
4041 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
4043 struct hns3_sfp_info_cmd *resp;
4044 struct hns3_cmd_desc desc;
4047 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
4048 resp = (struct hns3_sfp_info_cmd *)desc.data;
4049 resp->query_type = HNS3_ACTIVE_QUERY;
4051 ret = hns3_cmd_send(hw, &desc, 1);
4052 if (ret == -EOPNOTSUPP) {
4053 hns3_warn(hw, "firmware does not support get SFP info,"
4057 hns3_err(hw, "get sfp info failed, ret = %d.", ret);
4062 * In some case, the speed of MAC obtained from firmware may be 0, it
4063 * shouldn't be set to mac->speed.
4065 if (!rte_le_to_cpu_32(resp->sfp_speed))
4068 mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed);
4070 * if resp->supported_speed is 0, it means it's an old version
4071 * firmware, do not update these params.
4073 if (resp->supported_speed) {
4074 mac_info->query_type = HNS3_ACTIVE_QUERY;
4075 mac_info->supported_speed =
4076 rte_le_to_cpu_32(resp->supported_speed);
4077 mac_info->support_autoneg = resp->autoneg_ability;
4078 mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
4079 : RTE_ETH_LINK_AUTONEG;
4081 mac_info->query_type = HNS3_DEFAULT_QUERY;
4088 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
4090 if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
4091 duplex = RTE_ETH_LINK_FULL_DUPLEX;
4097 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
4099 struct hns3_mac *mac = &hw->mac;
4102 duplex = hns3_check_speed_dup(duplex, speed);
4103 if (mac->link_speed == speed && mac->link_duplex == duplex)
4106 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex);
4110 ret = hns3_port_shaper_update(hw, speed);
4114 mac->link_speed = speed;
4115 mac->link_duplex = duplex;
4121 hns3_update_fiber_link_info(struct hns3_hw *hw)
4123 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
4124 struct hns3_mac *mac = &hw->mac;
4125 struct hns3_mac mac_info;
4128 /* If firmware do not support get SFP/qSFP speed, return directly */
4129 if (!pf->support_sfp_query)
4132 memset(&mac_info, 0, sizeof(struct hns3_mac));
4133 ret = hns3_get_sfp_info(hw, &mac_info);
4134 if (ret == -EOPNOTSUPP) {
4135 pf->support_sfp_query = false;
4140 /* Do nothing if no SFP */
4141 if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
4145 * If query_type is HNS3_ACTIVE_QUERY, it is no need
4146 * to reconfigure the speed of MAC. Otherwise, it indicates
4147 * that the current firmware only supports to obtain the
4148 * speed of the SFP, and the speed of MAC needs to reconfigure.
4150 mac->query_type = mac_info.query_type;
4151 if (mac->query_type == HNS3_ACTIVE_QUERY) {
4152 if (mac_info.link_speed != mac->link_speed) {
4153 ret = hns3_port_shaper_update(hw, mac_info.link_speed);
4158 mac->link_speed = mac_info.link_speed;
4159 mac->supported_speed = mac_info.supported_speed;
4160 mac->support_autoneg = mac_info.support_autoneg;
4161 mac->link_autoneg = mac_info.link_autoneg;
4166 /* Config full duplex for SFP */
4167 return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
4168 RTE_ETH_LINK_FULL_DUPLEX);
4172 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac)
4174 #define HNS3_PHY_SUPPORTED_SPEED_MASK 0x2f
4176 struct hns3_phy_params_bd0_cmd *req;
4179 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
4180 mac->link_speed = rte_le_to_cpu_32(req->speed);
4181 mac->link_duplex = hns3_get_bit(req->duplex,
4182 HNS3_PHY_DUPLEX_CFG_B);
4183 mac->link_autoneg = hns3_get_bit(req->autoneg,
4184 HNS3_PHY_AUTONEG_CFG_B);
4185 mac->advertising = rte_le_to_cpu_32(req->advertising);
4186 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising);
4187 supported = rte_le_to_cpu_32(req->supported);
4188 mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK;
4189 mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT);
4193 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac)
4195 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
4199 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
4200 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
4202 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4204 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true);
4206 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
4208 hns3_err(hw, "get phy parameters failed, ret = %d.", ret);
4212 hns3_parse_copper_phy_params(desc, mac);
4218 hns3_update_copper_link_info(struct hns3_hw *hw)
4220 struct hns3_mac *mac = &hw->mac;
4221 struct hns3_mac mac_info;
4224 memset(&mac_info, 0, sizeof(struct hns3_mac));
4225 ret = hns3_get_copper_phy_params(hw, &mac_info);
4229 if (mac_info.link_speed != mac->link_speed) {
4230 ret = hns3_port_shaper_update(hw, mac_info.link_speed);
4235 mac->link_speed = mac_info.link_speed;
4236 mac->link_duplex = mac_info.link_duplex;
4237 mac->link_autoneg = mac_info.link_autoneg;
4238 mac->supported_speed = mac_info.supported_speed;
4239 mac->advertising = mac_info.advertising;
4240 mac->lp_advertising = mac_info.lp_advertising;
4241 mac->support_autoneg = mac_info.support_autoneg;
4247 hns3_update_link_info(struct rte_eth_dev *eth_dev)
4249 struct hns3_adapter *hns = eth_dev->data->dev_private;
4250 struct hns3_hw *hw = &hns->hw;
4253 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER)
4254 ret = hns3_update_copper_link_info(hw);
4255 else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER)
4256 ret = hns3_update_fiber_link_info(hw);
4262 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
4264 struct hns3_config_mac_mode_cmd *req;
4265 struct hns3_cmd_desc desc;
4266 uint32_t loop_en = 0;
4270 req = (struct hns3_config_mac_mode_cmd *)desc.data;
4272 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false);
4275 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val);
4276 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val);
4277 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val);
4278 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val);
4279 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0);
4280 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0);
4281 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0);
4282 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0);
4283 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val);
4284 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
4287 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
4288 * when receiving frames. Otherwise, CRC will be stripped.
4290 if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
4291 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
4293 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
4294 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val);
4295 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val);
4296 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val);
4297 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en);
4299 ret = hns3_cmd_send(hw, &desc, 1);
4301 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret);
4307 hns3_get_mac_link_status(struct hns3_hw *hw)
4309 struct hns3_link_status_cmd *req;
4310 struct hns3_cmd_desc desc;
4314 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true);
4315 ret = hns3_cmd_send(hw, &desc, 1);
4317 hns3_err(hw, "get link status cmd failed %d", ret);
4318 return RTE_ETH_LINK_DOWN;
4321 req = (struct hns3_link_status_cmd *)desc.data;
4322 link_status = req->status & HNS3_LINK_STATUS_UP_M;
4324 return !!link_status;
4328 hns3_update_link_status(struct hns3_hw *hw)
4332 state = hns3_get_mac_link_status(hw);
4333 if (state != hw->mac.link_status) {
4334 hw->mac.link_status = state;
4335 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down");
4343 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query)
4345 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
4346 struct rte_eth_link new_link;
4350 hns3_update_port_link_info(dev);
4352 memset(&new_link, 0, sizeof(new_link));
4353 hns3_setup_linkstatus(dev, &new_link);
4355 ret = rte_eth_linkstatus_set(dev, &new_link);
4356 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
4357 hns3_start_report_lse(dev);
4361 hns3_service_handler(void *param)
4363 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
4364 struct hns3_adapter *hns = eth_dev->data->dev_private;
4365 struct hns3_hw *hw = &hns->hw;
4367 if (!hns3_is_reset_pending(hns))
4368 hns3_update_linkstatus_and_event(hw, true);
4370 hns3_warn(hw, "Cancel the query when reset is pending");
4372 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
4376 hns3_init_hardware(struct hns3_adapter *hns)
4378 struct hns3_hw *hw = &hns->hw;
4382 * All queue-related HW operations must be performed after the TCAM
4383 * table is configured.
4385 ret = hns3_map_tqp(hw);
4387 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
4391 ret = hns3_init_umv_space(hw);
4393 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret);
4397 ret = hns3_mac_init(hw);
4399 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret);
4403 ret = hns3_init_mgr_tbl(hw);
4405 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret);
4409 ret = hns3_promisc_init(hw);
4411 PMD_INIT_LOG(ERR, "Failed to init promisc: %d",
4416 ret = hns3_init_vlan_config(hns);
4418 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret);
4422 ret = hns3_dcb_init(hw);
4424 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret);
4428 ret = hns3_init_fd_config(hns);
4430 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret);
4434 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
4436 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
4440 ret = hns3_config_gro(hw, false);
4442 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
4447 * In the initialization clearing the all hardware mapping relationship
4448 * configurations between queues and interrupt vectors is needed, so
4449 * some error caused by the residual configurations, such as the
4450 * unexpected interrupt, can be avoid.
4452 ret = hns3_init_ring_with_vector(hw);
4454 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
4461 hns3_uninit_umv_space(hw);
4466 hns3_clear_hw(struct hns3_hw *hw)
4468 struct hns3_cmd_desc desc;
4471 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false);
4473 ret = hns3_cmd_send(hw, &desc, 1);
4474 if (ret && ret != -EOPNOTSUPP)
4481 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable)
4486 * The new firmware support report more hardware error types by
4487 * msix mode. These errors are defined as RAS errors in hardware
4488 * and belong to a different type from the MSI-x errors processed
4489 * by the network driver.
4491 * Network driver should open the new error report on initialization.
4493 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
4494 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0);
4495 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val);
4499 hns3_set_firber_default_support_speed(struct hns3_hw *hw)
4501 struct hns3_mac *mac = &hw->mac;
4503 switch (mac->link_speed) {
4504 case RTE_ETH_SPEED_NUM_1G:
4505 return HNS3_FIBER_LINK_SPEED_1G_BIT;
4506 case RTE_ETH_SPEED_NUM_10G:
4507 return HNS3_FIBER_LINK_SPEED_10G_BIT;
4508 case RTE_ETH_SPEED_NUM_25G:
4509 return HNS3_FIBER_LINK_SPEED_25G_BIT;
4510 case RTE_ETH_SPEED_NUM_40G:
4511 return HNS3_FIBER_LINK_SPEED_40G_BIT;
4512 case RTE_ETH_SPEED_NUM_50G:
4513 return HNS3_FIBER_LINK_SPEED_50G_BIT;
4514 case RTE_ETH_SPEED_NUM_100G:
4515 return HNS3_FIBER_LINK_SPEED_100G_BIT;
4516 case RTE_ETH_SPEED_NUM_200G:
4517 return HNS3_FIBER_LINK_SPEED_200G_BIT;
4519 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
4525 * Validity of supported_speed for fiber and copper media type can be
4526 * guaranteed by the following policy:
4528 * Although the initialization of the phy in the firmware may not be
4529 * completed, the firmware can guarantees that the supported_speed is
4532 * If the version of firmware supports the active query way of the
4533 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained
4534 * through it. If unsupported, use the SFP's speed as the value of the
4538 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev)
4540 struct hns3_adapter *hns = eth_dev->data->dev_private;
4541 struct hns3_hw *hw = &hns->hw;
4542 struct hns3_mac *mac = &hw->mac;
4545 ret = hns3_update_link_info(eth_dev);
4549 if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) {
4551 * Some firmware does not support the report of supported_speed,
4552 * and only report the effective speed of SFP. In this case, it
4553 * is necessary to use the SFP's speed as the supported_speed.
4555 if (mac->supported_speed == 0)
4556 mac->supported_speed =
4557 hns3_set_firber_default_support_speed(hw);
4564 hns3_get_fc_autoneg_capability(struct hns3_adapter *hns)
4566 struct hns3_mac *mac = &hns->hw.mac;
4568 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) {
4569 hns->pf.support_fc_autoneg = true;
4574 * Flow control auto-negotiation requires the cooperation of the driver
4575 * and firmware. Currently, the optical port does not support flow
4576 * control auto-negotiation.
4578 hns->pf.support_fc_autoneg = false;
4582 hns3_init_pf(struct rte_eth_dev *eth_dev)
4584 struct rte_device *dev = eth_dev->device;
4585 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4586 struct hns3_adapter *hns = eth_dev->data->dev_private;
4587 struct hns3_hw *hw = &hns->hw;
4590 PMD_INIT_FUNC_TRACE();
4592 /* Get hardware io base address from pcie BAR2 IO space */
4593 hw->io_base = pci_dev->mem_resource[2].addr;
4595 /* Firmware command queue initialize */
4596 ret = hns3_cmd_init_queue(hw);
4598 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
4599 goto err_cmd_init_queue;
4602 hns3_clear_all_event_cause(hw);
4604 /* Firmware command initialize */
4605 ret = hns3_cmd_init(hw);
4607 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
4611 hns3_tx_push_init(eth_dev);
4614 * To ensure that the hardware environment is clean during
4615 * initialization, the driver actively clear the hardware environment
4616 * during initialization, including PF and corresponding VFs' vlan, mac,
4617 * flow table configurations, etc.
4619 ret = hns3_clear_hw(hw);
4621 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret);
4625 /* Hardware statistics of imissed registers cleared. */
4626 ret = hns3_update_imissed_stats(hw, true);
4628 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
4632 hns3_config_all_msix_error(hw, true);
4634 ret = rte_intr_callback_register(pci_dev->intr_handle,
4635 hns3_interrupt_handler,
4638 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
4639 goto err_intr_callback_register;
4642 ret = hns3_ptp_init(hw);
4644 goto err_get_config;
4646 /* Enable interrupt */
4647 rte_intr_enable(pci_dev->intr_handle);
4648 hns3_pf_enable_irq0(hw);
4650 /* Get configuration */
4651 ret = hns3_get_configuration(hw);
4653 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
4654 goto err_get_config;
4657 ret = hns3_tqp_stats_init(hw);
4659 goto err_get_config;
4661 ret = hns3_init_hardware(hns);
4663 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
4667 /* Initialize flow director filter list & hash */
4668 ret = hns3_fdir_filter_init(hns);
4670 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
4674 hns3_rss_set_default_args(hw);
4676 ret = hns3_enable_hw_error_intr(hns, true);
4678 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d",
4680 goto err_enable_intr;
4683 ret = hns3_get_port_supported_speed(eth_dev);
4685 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported "
4686 "by device, ret = %d.", ret);
4687 goto err_supported_speed;
4690 hns3_get_fc_autoneg_capability(hns);
4692 hns3_tm_conf_init(eth_dev);
4696 err_supported_speed:
4697 (void)hns3_enable_hw_error_intr(hns, false);
4699 hns3_fdir_filter_uninit(hns);
4701 hns3_uninit_umv_space(hw);
4703 hns3_tqp_stats_uninit(hw);
4705 hns3_pf_disable_irq0(hw);
4706 rte_intr_disable(pci_dev->intr_handle);
4707 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler,
4709 err_intr_callback_register:
4711 hns3_cmd_uninit(hw);
4712 hns3_cmd_destroy_queue(hw);
4720 hns3_uninit_pf(struct rte_eth_dev *eth_dev)
4722 struct hns3_adapter *hns = eth_dev->data->dev_private;
4723 struct rte_device *dev = eth_dev->device;
4724 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4725 struct hns3_hw *hw = &hns->hw;
4727 PMD_INIT_FUNC_TRACE();
4729 hns3_tm_conf_uninit(eth_dev);
4730 hns3_enable_hw_error_intr(hns, false);
4731 hns3_rss_uninit(hns);
4732 (void)hns3_config_gro(hw, false);
4733 hns3_promisc_uninit(hw);
4734 hns3_flow_uninit(eth_dev);
4735 hns3_fdir_filter_uninit(hns);
4736 hns3_uninit_umv_space(hw);
4737 hns3_tqp_stats_uninit(hw);
4738 hns3_config_mac_tnl_int(hw, false);
4739 hns3_pf_disable_irq0(hw);
4740 rte_intr_disable(pci_dev->intr_handle);
4741 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler,
4743 hns3_config_all_msix_error(hw, false);
4744 hns3_cmd_uninit(hw);
4745 hns3_cmd_destroy_queue(hw);
4750 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
4754 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
4755 case RTE_ETH_LINK_SPEED_10M:
4756 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
4758 case RTE_ETH_LINK_SPEED_10M_HD:
4759 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
4761 case RTE_ETH_LINK_SPEED_100M:
4762 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
4764 case RTE_ETH_LINK_SPEED_100M_HD:
4765 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
4767 case RTE_ETH_LINK_SPEED_1G:
4768 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
4779 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
4783 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
4784 case RTE_ETH_LINK_SPEED_1G:
4785 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
4787 case RTE_ETH_LINK_SPEED_10G:
4788 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
4790 case RTE_ETH_LINK_SPEED_25G:
4791 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
4793 case RTE_ETH_LINK_SPEED_40G:
4794 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
4796 case RTE_ETH_LINK_SPEED_50G:
4797 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
4799 case RTE_ETH_LINK_SPEED_100G:
4800 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
4802 case RTE_ETH_LINK_SPEED_200G:
4803 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
4814 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
4816 struct hns3_mac *mac = &hw->mac;
4817 uint32_t supported_speed = mac->supported_speed;
4818 uint32_t speed_bit = 0;
4820 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
4821 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds);
4822 else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER)
4823 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds);
4825 if (!(speed_bit & supported_speed)) {
4826 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.",
4835 hns3_get_link_speed(uint32_t link_speeds)
4837 uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
4839 if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
4840 link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
4841 speed = RTE_ETH_SPEED_NUM_10M;
4842 if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
4843 link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
4844 speed = RTE_ETH_SPEED_NUM_100M;
4845 if (link_speeds & RTE_ETH_LINK_SPEED_1G)
4846 speed = RTE_ETH_SPEED_NUM_1G;
4847 if (link_speeds & RTE_ETH_LINK_SPEED_10G)
4848 speed = RTE_ETH_SPEED_NUM_10G;
4849 if (link_speeds & RTE_ETH_LINK_SPEED_25G)
4850 speed = RTE_ETH_SPEED_NUM_25G;
4851 if (link_speeds & RTE_ETH_LINK_SPEED_40G)
4852 speed = RTE_ETH_SPEED_NUM_40G;
4853 if (link_speeds & RTE_ETH_LINK_SPEED_50G)
4854 speed = RTE_ETH_SPEED_NUM_50G;
4855 if (link_speeds & RTE_ETH_LINK_SPEED_100G)
4856 speed = RTE_ETH_SPEED_NUM_100G;
4857 if (link_speeds & RTE_ETH_LINK_SPEED_200G)
4858 speed = RTE_ETH_SPEED_NUM_200G;
4864 hns3_get_link_duplex(uint32_t link_speeds)
4866 if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
4867 (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
4868 return RTE_ETH_LINK_HALF_DUPLEX;
4870 return RTE_ETH_LINK_FULL_DUPLEX;
4874 hns3_set_copper_port_link_speed(struct hns3_hw *hw,
4875 struct hns3_set_link_speed_cfg *cfg)
4877 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
4878 struct hns3_phy_params_bd0_cmd *req;
4881 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
4882 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
4884 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4886 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false);
4887 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
4888 req->autoneg = cfg->autoneg;
4891 * The full speed capability is used to negotiate when
4892 * auto-negotiation is enabled.
4895 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT |
4896 HNS3_PHY_LINK_SPEED_10M_HD_BIT |
4897 HNS3_PHY_LINK_SPEED_100M_BIT |
4898 HNS3_PHY_LINK_SPEED_100M_HD_BIT |
4899 HNS3_PHY_LINK_SPEED_1000M_BIT;
4901 req->speed = cfg->speed;
4902 req->duplex = cfg->duplex;
4905 return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
4909 hns3_set_autoneg(struct hns3_hw *hw, bool enable)
4911 struct hns3_config_auto_neg_cmd *req;
4912 struct hns3_cmd_desc desc;
4916 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false);
4918 req = (struct hns3_config_auto_neg_cmd *)desc.data;
4920 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1);
4921 req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag);
4923 ret = hns3_cmd_send(hw, &desc, 1);
4925 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret);
4931 hns3_set_fiber_port_link_speed(struct hns3_hw *hw,
4932 struct hns3_set_link_speed_cfg *cfg)
4936 if (hw->mac.support_autoneg) {
4937 ret = hns3_set_autoneg(hw, cfg->autoneg);
4939 hns3_err(hw, "failed to configure auto-negotiation.");
4944 * To enable auto-negotiation, we only need to open the switch
4945 * of auto-negotiation, then firmware sets all speed
4953 * Some hardware doesn't support auto-negotiation, but users may not
4954 * configure link_speeds (default 0), which means auto-negotiation.
4955 * In this case, a warning message need to be printed, instead of
4959 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!");
4963 return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex);
4967 hns3_set_port_link_speed(struct hns3_hw *hw,
4968 struct hns3_set_link_speed_cfg *cfg)
4972 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) {
4973 #if defined(RTE_HNS3_ONLY_1630_FPGA)
4974 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
4979 ret = hns3_set_copper_port_link_speed(hw, cfg);
4981 hns3_err(hw, "failed to set copper port link speed,"
4985 } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) {
4986 ret = hns3_set_fiber_port_link_speed(hw, cfg);
4988 hns3_err(hw, "failed to set fiber port link speed,"
4998 hns3_apply_link_speed(struct hns3_hw *hw)
5000 struct rte_eth_conf *conf = &hw->data->dev_conf;
5001 struct hns3_set_link_speed_cfg cfg;
5003 memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
5004 cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
5005 RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
5006 if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
5007 cfg.speed = hns3_get_link_speed(conf->link_speeds);
5008 cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
5011 return hns3_set_port_link_speed(hw, &cfg);
5015 hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
5017 struct hns3_hw *hw = &hns->hw;
5021 ret = hns3_update_queue_map_configure(hns);
5023 hns3_err(hw, "failed to update queue mapping configuration, ret = %d",
5028 /* Note: hns3_tm_conf_update must be called after configuring DCB. */
5029 ret = hns3_tm_conf_update(hw);
5031 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret);
5035 hns3_enable_rxd_adv_layout(hw);
5037 ret = hns3_init_queues(hns, reset_queue);
5039 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret);
5043 link_en = hw->set_link_down ? false : true;
5044 ret = hns3_cfg_mac_mode(hw, link_en);
5046 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret);
5047 goto err_config_mac_mode;
5050 ret = hns3_apply_link_speed(hw);
5052 goto err_set_link_speed;
5057 (void)hns3_cfg_mac_mode(hw, false);
5059 err_config_mac_mode:
5060 hns3_dev_release_mbufs(hns);
5062 * Here is exception handling, hns3_reset_all_tqps will have the
5063 * corresponding error message if it is handled incorrectly, so it is
5064 * not necessary to check hns3_reset_all_tqps return value, here keep
5065 * ret as the error code causing the exception.
5067 (void)hns3_reset_all_tqps(hns);
5072 hns3_restore_filter(struct rte_eth_dev *dev)
5074 hns3_restore_rss_filter(dev);
5078 hns3_dev_start(struct rte_eth_dev *dev)
5080 struct hns3_adapter *hns = dev->data->dev_private;
5081 struct hns3_hw *hw = &hns->hw;
5082 bool old_state = hw->set_link_down;
5085 PMD_INIT_FUNC_TRACE();
5086 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
5089 rte_spinlock_lock(&hw->lock);
5090 hw->adapter_state = HNS3_NIC_STARTING;
5093 * If the dev_set_link_down() API has been called, the "set_link_down"
5094 * flag can be cleared by dev_start() API. In addition, the flag should
5095 * also be cleared before calling hns3_do_start() so that MAC can be
5096 * enabled in dev_start stage.
5098 hw->set_link_down = false;
5099 ret = hns3_do_start(hns, true);
5103 ret = hns3_map_rx_interrupt(dev);
5105 goto map_rx_inter_err;
5108 * There are three register used to control the status of a TQP
5109 * (contains a pair of Tx queue and Rx queue) in the new version network
5110 * engine. One is used to control the enabling of Tx queue, the other is
5111 * used to control the enabling of Rx queue, and the last is the master
5112 * switch used to control the enabling of the tqp. The Tx register and
5113 * TQP register must be enabled at the same time to enable a Tx queue.
5114 * The same applies to the Rx queue. For the older network engine, this
5115 * function only refresh the enabled flag, and it is used to update the
5116 * status of queue in the dpdk framework.
5118 ret = hns3_start_all_txqs(dev);
5120 goto map_rx_inter_err;
5122 ret = hns3_start_all_rxqs(dev);
5124 goto start_all_rxqs_fail;
5126 hw->adapter_state = HNS3_NIC_STARTED;
5127 rte_spinlock_unlock(&hw->lock);
5129 hns3_rx_scattered_calc(dev);
5130 hns3_set_rxtx_function(dev);
5131 hns3_mp_req_start_rxtx(dev);
5133 hns3_restore_filter(dev);
5135 /* Enable interrupt of all rx queues before enabling queues */
5136 hns3_dev_all_rx_queue_intr_enable(hw, true);
5139 * After finished the initialization, enable tqps to receive/transmit
5140 * packets and refresh all queue status.
5142 hns3_start_tqps(hw);
5144 hns3_tm_dev_start_proc(hw);
5146 if (dev->data->dev_conf.intr_conf.lsc != 0)
5147 hns3_dev_link_update(dev, 0);
5148 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
5150 hns3_info(hw, "hns3 dev start successful!");
5154 start_all_rxqs_fail:
5155 hns3_stop_all_txqs(dev);
5157 (void)hns3_do_stop(hns);
5159 hw->set_link_down = old_state;
5160 hw->adapter_state = HNS3_NIC_CONFIGURED;
5161 rte_spinlock_unlock(&hw->lock);
5167 hns3_do_stop(struct hns3_adapter *hns)
5169 struct hns3_hw *hw = &hns->hw;
5173 * The "hns3_do_stop" function will also be called by .stop_service to
5174 * prepare reset. At the time of global or IMP reset, the command cannot
5175 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
5176 * accessed during the reset process. So the mbuf can not be released
5177 * during reset and is required to be released after the reset is
5180 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
5181 hns3_dev_release_mbufs(hns);
5183 ret = hns3_cfg_mac_mode(hw, false);
5186 hw->mac.link_status = RTE_ETH_LINK_DOWN;
5188 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
5189 hns3_configure_all_mac_addr(hns, true);
5190 ret = hns3_reset_all_tqps(hns);
5192 hns3_err(hw, "failed to reset all queues ret = %d.",
5202 hns3_dev_stop(struct rte_eth_dev *dev)
5204 struct hns3_adapter *hns = dev->data->dev_private;
5205 struct hns3_hw *hw = &hns->hw;
5207 PMD_INIT_FUNC_TRACE();
5208 dev->data->dev_started = 0;
5210 hw->adapter_state = HNS3_NIC_STOPPING;
5211 hns3_set_rxtx_function(dev);
5213 /* Disable datapath on secondary process. */
5214 hns3_mp_req_stop_rxtx(dev);
5215 /* Prevent crashes when queues are still in use. */
5216 rte_delay_ms(hw->cfg_max_queues);
5218 rte_spinlock_lock(&hw->lock);
5219 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
5220 hns3_tm_dev_stop_proc(hw);
5221 hns3_config_mac_tnl_int(hw, false);
5224 hns3_unmap_rx_interrupt(dev);
5225 hw->adapter_state = HNS3_NIC_CONFIGURED;
5227 hns3_rx_scattered_reset(dev);
5228 rte_eal_alarm_cancel(hns3_service_handler, dev);
5229 hns3_stop_report_lse(dev);
5230 rte_spinlock_unlock(&hw->lock);
5236 hns3_dev_close(struct rte_eth_dev *eth_dev)
5238 struct hns3_adapter *hns = eth_dev->data->dev_private;
5239 struct hns3_hw *hw = &hns->hw;
5242 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
5243 hns3_mp_uninit(eth_dev);
5247 if (hw->adapter_state == HNS3_NIC_STARTED)
5248 ret = hns3_dev_stop(eth_dev);
5250 hw->adapter_state = HNS3_NIC_CLOSING;
5251 hns3_reset_abort(hns);
5252 hw->adapter_state = HNS3_NIC_CLOSED;
5254 hns3_configure_all_mc_mac_addr(hns, true);
5255 hns3_remove_all_vlan_table(hns);
5256 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
5257 hns3_uninit_pf(eth_dev);
5258 hns3_free_all_queues(eth_dev);
5259 rte_free(hw->reset.wait_data);
5260 hns3_mp_uninit(eth_dev);
5261 hns3_warn(hw, "Close port %u finished", hw->data->port_id);
5267 hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause,
5270 struct hns3_mac *mac = &hw->mac;
5271 uint32_t advertising = mac->advertising;
5272 uint32_t lp_advertising = mac->lp_advertising;
5276 if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) {
5279 } else if (advertising & lp_advertising &
5280 HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) {
5281 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT)
5283 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT)
5288 static enum hns3_fc_mode
5289 hns3_get_autoneg_fc_mode(struct hns3_hw *hw)
5291 enum hns3_fc_mode current_mode;
5292 bool rx_pause = false;
5293 bool tx_pause = false;
5295 switch (hw->mac.media_type) {
5296 case HNS3_MEDIA_TYPE_COPPER:
5297 hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause);
5301 * Flow control auto-negotiation is not supported for fiber and
5302 * backplane media type.
5304 case HNS3_MEDIA_TYPE_FIBER:
5305 case HNS3_MEDIA_TYPE_BACKPLANE:
5306 hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled.");
5307 current_mode = hw->requested_fc_mode;
5310 hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).",
5311 hw->mac.media_type);
5312 current_mode = HNS3_FC_NONE;
5316 if (rx_pause && tx_pause)
5317 current_mode = HNS3_FC_FULL;
5319 current_mode = HNS3_FC_RX_PAUSE;
5321 current_mode = HNS3_FC_TX_PAUSE;
5323 current_mode = HNS3_FC_NONE;
5326 return current_mode;
5329 static enum hns3_fc_mode
5330 hns3_get_current_fc_mode(struct rte_eth_dev *dev)
5332 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5333 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5334 struct hns3_mac *mac = &hw->mac;
5337 * When the flow control mode is obtained, the device may not complete
5338 * auto-negotiation. It is necessary to wait for link establishment.
5340 (void)hns3_dev_link_update(dev, 1);
5343 * If the link auto-negotiation of the nic is disabled, or the flow
5344 * control auto-negotiation is not supported, the forced flow control
5347 if (mac->link_autoneg == 0 || !pf->support_fc_autoneg)
5348 return hw->requested_fc_mode;
5350 return hns3_get_autoneg_fc_mode(hw);
5354 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
5356 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5357 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5358 enum hns3_fc_mode current_mode;
5360 current_mode = hns3_get_current_fc_mode(dev);
5361 switch (current_mode) {
5363 fc_conf->mode = RTE_ETH_FC_FULL;
5365 case HNS3_FC_TX_PAUSE:
5366 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
5368 case HNS3_FC_RX_PAUSE:
5369 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
5373 fc_conf->mode = RTE_ETH_FC_NONE;
5377 fc_conf->pause_time = pf->pause_time;
5378 fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0;
5384 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg)
5386 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
5388 if (!pf->support_fc_autoneg) {
5390 hns3_err(hw, "unsupported fc auto-negotiation setting.");
5395 * Flow control auto-negotiation of the NIC is not supported,
5396 * but other auto-negotiation features may be supported.
5398 if (autoneg != hw->mac.link_autoneg) {
5399 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!");
5407 * If flow control auto-negotiation of the NIC is supported, all
5408 * auto-negotiation features are supported.
5410 if (autoneg != hw->mac.link_autoneg) {
5411 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!");
5419 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
5421 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5422 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5425 if (fc_conf->high_water || fc_conf->low_water ||
5426 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) {
5427 hns3_err(hw, "Unsupported flow control settings specified, "
5428 "high_water(%u), low_water(%u), send_xon(%u) and "
5429 "mac_ctrl_frame_fwd(%u) must be set to '0'",
5430 fc_conf->high_water, fc_conf->low_water,
5431 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd);
5435 ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg);
5439 if (!fc_conf->pause_time) {
5440 hns3_err(hw, "Invalid pause time %u setting.",
5441 fc_conf->pause_time);
5445 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
5446 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) {
5447 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. "
5448 "current_fc_status = %d", hw->current_fc_status);
5452 if (hw->num_tc > 1 && !pf->support_multi_tc_pause) {
5453 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported.");
5457 rte_spinlock_lock(&hw->lock);
5458 ret = hns3_fc_enable(dev, fc_conf);
5459 rte_spinlock_unlock(&hw->lock);
5465 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,
5466 struct rte_eth_pfc_conf *pfc_conf)
5468 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5471 if (!hns3_dev_get_support(hw, DCB)) {
5472 hns3_err(hw, "This port does not support dcb configurations.");
5476 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water ||
5477 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) {
5478 hns3_err(hw, "Unsupported flow control settings specified, "
5479 "high_water(%u), low_water(%u), send_xon(%u) and "
5480 "mac_ctrl_frame_fwd(%u) must be set to '0'",
5481 pfc_conf->fc.high_water, pfc_conf->fc.low_water,
5482 pfc_conf->fc.send_xon,
5483 pfc_conf->fc.mac_ctrl_frame_fwd);
5486 if (pfc_conf->fc.autoneg) {
5487 hns3_err(hw, "Unsupported fc auto-negotiation setting.");
5490 if (pfc_conf->fc.pause_time == 0) {
5491 hns3_err(hw, "Invalid pause time %u setting.",
5492 pfc_conf->fc.pause_time);
5496 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
5497 hw->current_fc_status == HNS3_FC_STATUS_PFC)) {
5498 hns3_err(hw, "MAC pause is enabled. Cannot set PFC."
5499 "current_fc_status = %d", hw->current_fc_status);
5503 rte_spinlock_lock(&hw->lock);
5504 ret = hns3_dcb_pfc_enable(dev, pfc_conf);
5505 rte_spinlock_unlock(&hw->lock);
5511 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
5513 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5514 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5515 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
5518 rte_spinlock_lock(&hw->lock);
5519 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
5520 dcb_info->nb_tcs = pf->local_max_tc;
5522 dcb_info->nb_tcs = 1;
5524 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
5525 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i];
5526 for (i = 0; i < dcb_info->nb_tcs; i++)
5527 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
5529 for (i = 0; i < hw->num_tc; i++) {
5530 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i;
5531 dcb_info->tc_queue.tc_txq[0][i].base =
5532 hw->tc_queue[i].tqp_offset;
5533 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size;
5534 dcb_info->tc_queue.tc_txq[0][i].nb_queue =
5535 hw->tc_queue[i].tqp_count;
5537 rte_spinlock_unlock(&hw->lock);
5543 hns3_reinit_dev(struct hns3_adapter *hns)
5545 struct hns3_hw *hw = &hns->hw;
5548 ret = hns3_cmd_init(hw);
5550 hns3_err(hw, "Failed to init cmd: %d", ret);
5554 ret = hns3_init_hardware(hns);
5556 hns3_err(hw, "Failed to init hardware: %d", ret);
5560 ret = hns3_reset_all_tqps(hns);
5562 hns3_err(hw, "Failed to reset all queues: %d", ret);
5566 ret = hns3_enable_hw_error_intr(hns, true);
5568 hns3_err(hw, "fail to enable hw error interrupts: %d",
5572 hns3_info(hw, "Reset done, driver initialization finished.");
5578 is_pf_reset_done(struct hns3_hw *hw)
5580 uint32_t val, reg, reg_bit;
5582 switch (hw->reset.level) {
5583 case HNS3_IMP_RESET:
5584 reg = HNS3_GLOBAL_RESET_REG;
5585 reg_bit = HNS3_IMP_RESET_BIT;
5587 case HNS3_GLOBAL_RESET:
5588 reg = HNS3_GLOBAL_RESET_REG;
5589 reg_bit = HNS3_GLOBAL_RESET_BIT;
5591 case HNS3_FUNC_RESET:
5592 reg = HNS3_FUN_RST_ING;
5593 reg_bit = HNS3_FUN_RST_ING_B;
5595 case HNS3_FLR_RESET:
5597 hns3_err(hw, "Wait for unsupported reset level: %d",
5601 val = hns3_read_dev(hw, reg);
5602 if (hns3_get_bit(val, reg_bit))
5609 hns3_is_reset_pending(struct hns3_adapter *hns)
5611 struct hns3_hw *hw = &hns->hw;
5612 enum hns3_reset_level reset;
5614 hns3_check_event_cause(hns, NULL);
5615 reset = hns3_get_reset_level(hns, &hw->reset.pending);
5616 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
5617 hw->reset.level < reset) {
5618 hns3_warn(hw, "High level reset %d is pending", reset);
5621 reset = hns3_get_reset_level(hns, &hw->reset.request);
5622 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
5623 hw->reset.level < reset) {
5624 hns3_warn(hw, "High level reset %d is request", reset);
5631 hns3_wait_hardware_ready(struct hns3_adapter *hns)
5633 struct hns3_hw *hw = &hns->hw;
5634 struct hns3_wait_data *wait_data = hw->reset.wait_data;
5637 if (wait_data->result == HNS3_WAIT_SUCCESS)
5639 else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
5640 hns3_clock_gettime(&tv);
5641 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
5642 tv.tv_sec, tv.tv_usec);
5644 } else if (wait_data->result == HNS3_WAIT_REQUEST)
5647 wait_data->hns = hns;
5648 wait_data->check_completion = is_pf_reset_done;
5649 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT *
5650 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms();
5651 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC;
5652 wait_data->count = HNS3_RESET_WAIT_CNT;
5653 wait_data->result = HNS3_WAIT_REQUEST;
5654 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
5659 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id)
5661 struct hns3_cmd_desc desc;
5662 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data;
5664 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
5665 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1);
5666 req->fun_reset_vfid = func_id;
5668 return hns3_cmd_send(hw, &desc, 1);
5672 hns3_imp_reset_cmd(struct hns3_hw *hw)
5674 struct hns3_cmd_desc desc;
5676 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false);
5677 desc.data[0] = 0xeedd;
5679 return hns3_cmd_send(hw, &desc, 1);
5683 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level)
5685 struct hns3_hw *hw = &hns->hw;
5689 hns3_clock_gettime(&tv);
5690 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) ||
5691 hns3_read_dev(hw, HNS3_FUN_RST_ING)) {
5692 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld",
5693 tv.tv_sec, tv.tv_usec);
5697 switch (reset_level) {
5698 case HNS3_IMP_RESET:
5699 hns3_imp_reset_cmd(hw);
5700 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld",
5701 tv.tv_sec, tv.tv_usec);
5703 case HNS3_GLOBAL_RESET:
5704 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
5705 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1);
5706 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val);
5707 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld",
5708 tv.tv_sec, tv.tv_usec);
5710 case HNS3_FUNC_RESET:
5711 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld",
5712 tv.tv_sec, tv.tv_usec);
5713 /* schedule again to check later */
5714 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending);
5715 hns3_schedule_reset(hns);
5718 hns3_warn(hw, "Unsupported reset level: %d", reset_level);
5721 hns3_atomic_clear_bit(reset_level, &hw->reset.request);
5724 static enum hns3_reset_level
5725 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
5727 struct hns3_hw *hw = &hns->hw;
5728 enum hns3_reset_level reset_level = HNS3_NONE_RESET;
5730 /* Return the highest priority reset level amongst all */
5731 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels))
5732 reset_level = HNS3_IMP_RESET;
5733 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
5734 reset_level = HNS3_GLOBAL_RESET;
5735 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
5736 reset_level = HNS3_FUNC_RESET;
5737 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
5738 reset_level = HNS3_FLR_RESET;
5740 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
5741 return HNS3_NONE_RESET;
5747 hns3_record_imp_error(struct hns3_adapter *hns)
5749 struct hns3_hw *hw = &hns->hw;
5752 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
5753 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
5754 hns3_warn(hw, "Detected IMP RD poison!");
5755 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
5756 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
5759 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
5760 hns3_warn(hw, "Detected IMP CMDQ error!");
5761 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
5762 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
5767 hns3_prepare_reset(struct hns3_adapter *hns)
5769 struct hns3_hw *hw = &hns->hw;
5773 switch (hw->reset.level) {
5774 case HNS3_FUNC_RESET:
5775 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID);
5780 * After performaning pf reset, it is not necessary to do the
5781 * mailbox handling or send any command to firmware, because
5782 * any mailbox handling or command to firmware is only valid
5783 * after hns3_cmd_init is called.
5785 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
5786 hw->reset.stats.request_cnt++;
5788 case HNS3_IMP_RESET:
5789 hns3_record_imp_error(hns);
5790 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
5791 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
5792 BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
5801 hns3_set_rst_done(struct hns3_hw *hw)
5803 struct hns3_pf_rst_done_cmd *req;
5804 struct hns3_cmd_desc desc;
5806 req = (struct hns3_pf_rst_done_cmd *)desc.data;
5807 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
5808 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
5809 return hns3_cmd_send(hw, &desc, 1);
5813 hns3_stop_service(struct hns3_adapter *hns)
5815 struct hns3_hw *hw = &hns->hw;
5816 struct rte_eth_dev *eth_dev;
5818 eth_dev = &rte_eth_devices[hw->data->port_id];
5819 hw->mac.link_status = RTE_ETH_LINK_DOWN;
5820 if (hw->adapter_state == HNS3_NIC_STARTED) {
5821 rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
5822 hns3_update_linkstatus_and_event(hw, false);
5825 hns3_set_rxtx_function(eth_dev);
5827 /* Disable datapath on secondary process. */
5828 hns3_mp_req_stop_rxtx(eth_dev);
5829 rte_delay_ms(hw->cfg_max_queues);
5831 rte_spinlock_lock(&hw->lock);
5832 if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
5833 hw->adapter_state == HNS3_NIC_STOPPING) {
5834 hns3_enable_all_queues(hw, false);
5836 hw->reset.mbuf_deferred_free = true;
5838 hw->reset.mbuf_deferred_free = false;
5841 * It is cumbersome for hardware to pick-and-choose entries for deletion
5842 * from table space. Hence, for function reset software intervention is
5843 * required to delete the entries
5845 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
5846 hns3_configure_all_mc_mac_addr(hns, true);
5847 rte_spinlock_unlock(&hw->lock);
5853 hns3_start_service(struct hns3_adapter *hns)
5855 struct hns3_hw *hw = &hns->hw;
5856 struct rte_eth_dev *eth_dev;
5858 if (hw->reset.level == HNS3_IMP_RESET ||
5859 hw->reset.level == HNS3_GLOBAL_RESET)
5860 hns3_set_rst_done(hw);
5861 eth_dev = &rte_eth_devices[hw->data->port_id];
5862 hns3_set_rxtx_function(eth_dev);
5863 hns3_mp_req_start_rxtx(eth_dev);
5864 if (hw->adapter_state == HNS3_NIC_STARTED) {
5866 * This API parent function already hold the hns3_hw.lock, the
5867 * hns3_service_handler may report lse, in bonding application
5868 * it will call driver's ops which may acquire the hns3_hw.lock
5869 * again, thus lead to deadlock.
5870 * We defer calls hns3_service_handler to avoid the deadlock.
5872 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
5873 hns3_service_handler, eth_dev);
5875 /* Enable interrupt of all rx queues before enabling queues */
5876 hns3_dev_all_rx_queue_intr_enable(hw, true);
5878 * Enable state of each rxq and txq will be recovered after
5879 * reset, so we need to restore them before enable all tqps;
5881 hns3_restore_tqp_enable_state(hw);
5883 * When finished the initialization, enable queues to receive
5884 * and transmit packets.
5886 hns3_enable_all_queues(hw, true);
5893 hns3_restore_conf(struct hns3_adapter *hns)
5895 struct hns3_hw *hw = &hns->hw;
5898 ret = hns3_configure_all_mac_addr(hns, false);
5902 ret = hns3_configure_all_mc_mac_addr(hns, false);
5906 ret = hns3_dev_promisc_restore(hns);
5910 ret = hns3_restore_vlan_table(hns);
5914 ret = hns3_restore_vlan_conf(hns);
5918 ret = hns3_restore_all_fdir_filter(hns);
5922 ret = hns3_restore_ptp(hns);
5926 ret = hns3_restore_rx_interrupt(hw);
5930 ret = hns3_restore_gro_conf(hw);
5934 ret = hns3_restore_fec(hw);
5938 if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
5939 ret = hns3_do_start(hns, false);
5942 hns3_info(hw, "hns3 dev restart successful!");
5943 } else if (hw->adapter_state == HNS3_NIC_STOPPING)
5944 hw->adapter_state = HNS3_NIC_CONFIGURED;
5948 hns3_configure_all_mc_mac_addr(hns, true);
5950 hns3_configure_all_mac_addr(hns, true);
5955 hns3_reset_service(void *param)
5957 struct hns3_adapter *hns = (struct hns3_adapter *)param;
5958 struct hns3_hw *hw = &hns->hw;
5959 enum hns3_reset_level reset_level;
5960 struct timeval tv_delta;
5961 struct timeval tv_start;
5967 * The interrupt is not triggered within the delay time.
5968 * The interrupt may have been lost. It is necessary to handle
5969 * the interrupt to recover from the error.
5971 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
5972 SCHEDULE_DEFERRED) {
5973 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
5975 hns3_err(hw, "Handling interrupts in delayed tasks");
5976 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
5977 reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
5978 if (reset_level == HNS3_NONE_RESET) {
5979 hns3_err(hw, "No reset level is set, try IMP reset");
5980 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
5983 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
5986 * Check if there is any ongoing reset in the hardware. This status can
5987 * be checked from reset_pending. If there is then, we need to wait for
5988 * hardware to complete reset.
5989 * a. If we are able to figure out in reasonable time that hardware
5990 * has fully resetted then, we can proceed with driver, client
5992 * b. else, we can come back later to check this status so re-sched
5995 reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
5996 if (reset_level != HNS3_NONE_RESET) {
5997 hns3_clock_gettime(&tv_start);
5998 ret = hns3_reset_process(hns, reset_level);
5999 hns3_clock_gettime(&tv);
6000 timersub(&tv, &tv_start, &tv_delta);
6001 msec = hns3_clock_calctime_ms(&tv_delta);
6002 if (msec > HNS3_RESET_PROCESS_MS)
6003 hns3_err(hw, "%d handle long time delta %" PRIu64
6004 " ms time=%ld.%.6ld",
6005 hw->reset.level, msec,
6006 tv.tv_sec, tv.tv_usec);
6011 /* Check if we got any *new* reset requests to be honored */
6012 reset_level = hns3_get_reset_level(hns, &hw->reset.request);
6013 if (reset_level != HNS3_NONE_RESET)
6014 hns3_msix_process(hns, reset_level);
6018 hns3_get_speed_capa_num(uint16_t device_id)
6022 switch (device_id) {
6023 case HNS3_DEV_ID_25GE:
6024 case HNS3_DEV_ID_25GE_RDMA:
6027 case HNS3_DEV_ID_100G_RDMA_MACSEC:
6028 case HNS3_DEV_ID_200G_RDMA:
6040 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa,
6043 switch (device_id) {
6044 case HNS3_DEV_ID_25GE:
6046 case HNS3_DEV_ID_25GE_RDMA:
6047 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed;
6048 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa;
6050 /* In HNS3 device, the 25G NIC is compatible with 10G rate */
6051 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed;
6052 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa;
6054 case HNS3_DEV_ID_100G_RDMA_MACSEC:
6055 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed;
6056 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa;
6058 case HNS3_DEV_ID_200G_RDMA:
6059 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed;
6060 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa;
6070 hns3_fec_get_capability(struct rte_eth_dev *dev,
6071 struct rte_eth_fec_capa *speed_fec_capa,
6074 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6075 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
6076 uint16_t device_id = pci_dev->id.device_id;
6077 unsigned int capa_num;
6080 capa_num = hns3_get_speed_capa_num(device_id);
6081 if (capa_num == 0) {
6082 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD",
6087 if (speed_fec_capa == NULL || num < capa_num)
6090 ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id);
6098 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
6100 struct hns3_config_fec_cmd *req;
6101 struct hns3_cmd_desc desc;
6105 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported
6106 * in device of link speed
6109 if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
6114 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
6115 req = (struct hns3_config_fec_cmd *)desc.data;
6116 ret = hns3_cmd_send(hw, &desc, 1);
6118 hns3_err(hw, "get current fec auto state failed, ret = %d",
6123 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B);
6128 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
6130 struct hns3_sfp_info_cmd *resp;
6131 uint32_t tmp_fec_capa;
6133 struct hns3_cmd_desc desc;
6137 * If link is down and AUTO is enabled, AUTO is returned, otherwise,
6138 * configured FEC mode is returned.
6139 * If link is up, current FEC mode is returned.
6141 if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
6142 ret = get_current_fec_auto_state(hw, &auto_state);
6146 if (auto_state == 0x1) {
6147 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
6152 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
6153 resp = (struct hns3_sfp_info_cmd *)desc.data;
6154 resp->query_type = HNS3_ACTIVE_QUERY;
6156 ret = hns3_cmd_send(hw, &desc, 1);
6157 if (ret == -EOPNOTSUPP) {
6158 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret);
6161 hns3_err(hw, "get FEC failed, ret = %d", ret);
6166 * FEC mode order defined in hns3 hardware is inconsistent with
6167 * that defined in the ethdev library. So the sequence needs
6170 switch (resp->active_fec) {
6171 case HNS3_HW_FEC_MODE_NOFEC:
6172 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
6174 case HNS3_HW_FEC_MODE_BASER:
6175 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
6177 case HNS3_HW_FEC_MODE_RS:
6178 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
6181 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
6185 *fec_capa = tmp_fec_capa;
6190 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
6192 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6194 return hns3_fec_get_internal(hw, fec_capa);
6198 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
6200 struct hns3_config_fec_cmd *req;
6201 struct hns3_cmd_desc desc;
6204 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false);
6206 req = (struct hns3_config_fec_cmd *)desc.data;
6208 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC):
6209 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6210 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF);
6212 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER):
6213 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6214 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER);
6216 case RTE_ETH_FEC_MODE_CAPA_MASK(RS):
6217 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6218 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS);
6220 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO):
6221 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1);
6226 ret = hns3_cmd_send(hw, &desc, 1);
6228 hns3_err(hw, "set fec mode failed, ret = %d", ret);
6234 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
6236 struct hns3_mac *mac = &hw->mac;
6239 switch (mac->link_speed) {
6240 case RTE_ETH_SPEED_NUM_10G:
6241 cur_capa = fec_capa[1].capa;
6243 case RTE_ETH_SPEED_NUM_25G:
6244 case RTE_ETH_SPEED_NUM_100G:
6245 case RTE_ETH_SPEED_NUM_200G:
6246 cur_capa = fec_capa[0].capa;
6257 is_fec_mode_one_bit_set(uint32_t mode)
6262 for (i = 0; i < sizeof(mode); i++)
6263 if (mode >> i & 0x1)
6266 return cnt == 1 ? true : false;
6270 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
6272 #define FEC_CAPA_NUM 2
6273 struct hns3_adapter *hns = dev->data->dev_private;
6274 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6275 struct hns3_pf *pf = &hns->pf;
6276 struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
6278 uint32_t num = FEC_CAPA_NUM;
6281 ret = hns3_fec_get_capability(dev, fec_capa, num);
6285 /* HNS3 PMD only support one bit set mode, e.g. 0x1, 0x4 */
6286 if (!is_fec_mode_one_bit_set(mode)) {
6287 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
6288 "FEC mode should be only one bit set", mode);
6293 * Check whether the configured mode is within the FEC capability.
6294 * If not, the configured mode will not be supported.
6296 cur_capa = get_current_speed_fec_cap(hw, fec_capa);
6297 if (!(cur_capa & mode)) {
6298 hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
6302 rte_spinlock_lock(&hw->lock);
6303 ret = hns3_set_fec_hw(hw, mode);
6305 rte_spinlock_unlock(&hw->lock);
6309 pf->fec_mode = mode;
6310 rte_spinlock_unlock(&hw->lock);
6316 hns3_restore_fec(struct hns3_hw *hw)
6318 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
6319 struct hns3_pf *pf = &hns->pf;
6320 uint32_t mode = pf->fec_mode;
6323 ret = hns3_set_fec_hw(hw, mode);
6325 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d",
6332 hns3_query_dev_fec_info(struct hns3_hw *hw)
6334 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
6335 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns);
6338 ret = hns3_fec_get_internal(hw, &pf->fec_mode);
6340 hns3_err(hw, "query device FEC info failed, ret = %d", ret);
6346 hns3_optical_module_existed(struct hns3_hw *hw)
6348 struct hns3_cmd_desc desc;
6352 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true);
6353 ret = hns3_cmd_send(hw, &desc, 1);
6356 "fail to get optical module exist state, ret = %d.\n",
6360 existed = !!desc.data[0];
6366 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset,
6367 uint32_t len, uint8_t *data)
6369 #define HNS3_SFP_INFO_CMD_NUM 6
6370 #define HNS3_SFP_INFO_MAX_LEN \
6371 (HNS3_SFP_INFO_BD0_LEN + \
6372 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN)
6373 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM];
6374 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0;
6380 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) {
6381 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM,
6383 if (i < HNS3_SFP_INFO_CMD_NUM - 1)
6384 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
6387 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data;
6388 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset);
6389 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN);
6390 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len);
6392 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM);
6394 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n",
6399 /* The data format in BD0 is different with the others. */
6400 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN);
6401 memcpy(data, sfp_info_bd0->data, copy_len);
6402 read_len = copy_len;
6404 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) {
6405 if (read_len >= len)
6408 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN);
6409 memcpy(data + read_len, desc[i].data, copy_len);
6410 read_len += copy_len;
6413 return (int)read_len;
6417 hns3_get_module_eeprom(struct rte_eth_dev *dev,
6418 struct rte_dev_eeprom_info *info)
6420 struct hns3_adapter *hns = dev->data->dev_private;
6421 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6422 uint32_t offset = info->offset;
6423 uint32_t len = info->length;
6424 uint8_t *data = info->data;
6425 uint32_t read_len = 0;
6427 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER)
6430 if (!hns3_optical_module_existed(hw)) {
6431 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n");
6435 while (read_len < len) {
6437 ret = hns3_get_module_eeprom_data(hw, offset + read_len,
6449 hns3_get_module_info(struct rte_eth_dev *dev,
6450 struct rte_eth_dev_module_info *modinfo)
6452 #define HNS3_SFF8024_ID_SFP 0x03
6453 #define HNS3_SFF8024_ID_QSFP_8438 0x0c
6454 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d
6455 #define HNS3_SFF8024_ID_QSFP28_8636 0x11
6456 #define HNS3_SFF_8636_V1_3 0x03
6457 struct hns3_adapter *hns = dev->data->dev_private;
6458 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6459 struct rte_dev_eeprom_info info;
6460 struct hns3_sfp_type sfp_type;
6463 memset(&sfp_type, 0, sizeof(sfp_type));
6464 memset(&info, 0, sizeof(info));
6465 info.data = (uint8_t *)&sfp_type;
6466 info.length = sizeof(sfp_type);
6467 ret = hns3_get_module_eeprom(dev, &info);
6471 switch (sfp_type.type) {
6472 case HNS3_SFF8024_ID_SFP:
6473 modinfo->type = RTE_ETH_MODULE_SFF_8472;
6474 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
6476 case HNS3_SFF8024_ID_QSFP_8438:
6477 modinfo->type = RTE_ETH_MODULE_SFF_8436;
6478 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
6480 case HNS3_SFF8024_ID_QSFP_8436_8636:
6481 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
6482 modinfo->type = RTE_ETH_MODULE_SFF_8436;
6483 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
6485 modinfo->type = RTE_ETH_MODULE_SFF_8636;
6486 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
6489 case HNS3_SFF8024_ID_QSFP28_8636:
6490 modinfo->type = RTE_ETH_MODULE_SFF_8636;
6491 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
6494 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n",
6495 sfp_type.type, sfp_type.ext_type);
6502 static const struct eth_dev_ops hns3_eth_dev_ops = {
6503 .dev_configure = hns3_dev_configure,
6504 .dev_start = hns3_dev_start,
6505 .dev_stop = hns3_dev_stop,
6506 .dev_close = hns3_dev_close,
6507 .promiscuous_enable = hns3_dev_promiscuous_enable,
6508 .promiscuous_disable = hns3_dev_promiscuous_disable,
6509 .allmulticast_enable = hns3_dev_allmulticast_enable,
6510 .allmulticast_disable = hns3_dev_allmulticast_disable,
6511 .mtu_set = hns3_dev_mtu_set,
6512 .stats_get = hns3_stats_get,
6513 .stats_reset = hns3_stats_reset,
6514 .xstats_get = hns3_dev_xstats_get,
6515 .xstats_get_names = hns3_dev_xstats_get_names,
6516 .xstats_reset = hns3_dev_xstats_reset,
6517 .xstats_get_by_id = hns3_dev_xstats_get_by_id,
6518 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
6519 .dev_infos_get = hns3_dev_infos_get,
6520 .fw_version_get = hns3_fw_version_get,
6521 .rx_queue_setup = hns3_rx_queue_setup,
6522 .tx_queue_setup = hns3_tx_queue_setup,
6523 .rx_queue_release = hns3_dev_rx_queue_release,
6524 .tx_queue_release = hns3_dev_tx_queue_release,
6525 .rx_queue_start = hns3_dev_rx_queue_start,
6526 .rx_queue_stop = hns3_dev_rx_queue_stop,
6527 .tx_queue_start = hns3_dev_tx_queue_start,
6528 .tx_queue_stop = hns3_dev_tx_queue_stop,
6529 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
6530 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
6531 .rxq_info_get = hns3_rxq_info_get,
6532 .txq_info_get = hns3_txq_info_get,
6533 .rx_burst_mode_get = hns3_rx_burst_mode_get,
6534 .tx_burst_mode_get = hns3_tx_burst_mode_get,
6535 .flow_ctrl_get = hns3_flow_ctrl_get,
6536 .flow_ctrl_set = hns3_flow_ctrl_set,
6537 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,
6538 .mac_addr_add = hns3_add_mac_addr,
6539 .mac_addr_remove = hns3_remove_mac_addr,
6540 .mac_addr_set = hns3_set_default_mac_addr,
6541 .set_mc_addr_list = hns3_set_mc_mac_addr_list,
6542 .link_update = hns3_dev_link_update,
6543 .dev_set_link_up = hns3_dev_set_link_up,
6544 .dev_set_link_down = hns3_dev_set_link_down,
6545 .rss_hash_update = hns3_dev_rss_hash_update,
6546 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
6547 .reta_update = hns3_dev_rss_reta_update,
6548 .reta_query = hns3_dev_rss_reta_query,
6549 .flow_ops_get = hns3_dev_flow_ops_get,
6550 .vlan_filter_set = hns3_vlan_filter_set,
6551 .vlan_tpid_set = hns3_vlan_tpid_set,
6552 .vlan_offload_set = hns3_vlan_offload_set,
6553 .vlan_pvid_set = hns3_vlan_pvid_set,
6554 .get_reg = hns3_get_regs,
6555 .get_module_info = hns3_get_module_info,
6556 .get_module_eeprom = hns3_get_module_eeprom,
6557 .get_dcb_info = hns3_get_dcb_info,
6558 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
6559 .fec_get_capability = hns3_fec_get_capability,
6560 .fec_get = hns3_fec_get,
6561 .fec_set = hns3_fec_set,
6562 .tm_ops_get = hns3_tm_ops_get,
6563 .tx_done_cleanup = hns3_tx_done_cleanup,
6564 .timesync_enable = hns3_timesync_enable,
6565 .timesync_disable = hns3_timesync_disable,
6566 .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp,
6567 .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp,
6568 .timesync_adjust_time = hns3_timesync_adjust_time,
6569 .timesync_read_time = hns3_timesync_read_time,
6570 .timesync_write_time = hns3_timesync_write_time,
6571 .eth_dev_priv_dump = hns3_eth_dev_priv_dump,
6574 static const struct hns3_reset_ops hns3_reset_ops = {
6575 .reset_service = hns3_reset_service,
6576 .stop_service = hns3_stop_service,
6577 .prepare_reset = hns3_prepare_reset,
6578 .wait_hardware_ready = hns3_wait_hardware_ready,
6579 .reinit_dev = hns3_reinit_dev,
6580 .restore_conf = hns3_restore_conf,
6581 .start_service = hns3_start_service,
6585 hns3_init_hw_ops(struct hns3_hw *hw)
6587 hw->ops.add_mc_mac_addr = hns3_add_mc_mac_addr;
6588 hw->ops.del_mc_mac_addr = hns3_remove_mc_mac_addr;
6589 hw->ops.add_uc_mac_addr = hns3_add_uc_mac_addr;
6590 hw->ops.del_uc_mac_addr = hns3_remove_uc_mac_addr;
6591 hw->ops.bind_ring_with_vector = hns3_bind_ring_with_vector;
6595 hns3_dev_init(struct rte_eth_dev *eth_dev)
6597 struct hns3_adapter *hns = eth_dev->data->dev_private;
6598 struct hns3_hw *hw = &hns->hw;
6601 PMD_INIT_FUNC_TRACE();
6603 hns3_flow_init(eth_dev);
6605 hns3_set_rxtx_function(eth_dev);
6606 eth_dev->dev_ops = &hns3_eth_dev_ops;
6607 eth_dev->rx_queue_count = hns3_rx_queue_count;
6608 ret = hns3_mp_init(eth_dev);
6612 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
6613 hns3_tx_push_init(eth_dev);
6617 hw->adapter_state = HNS3_NIC_UNINITIALIZED;
6619 hw->data = eth_dev->data;
6620 hns3_parse_devargs(eth_dev);
6623 * Set default max packet size according to the mtu
6624 * default vale in DPDK frame.
6626 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
6628 ret = hns3_reset_init(hw);
6630 goto err_init_reset;
6631 hw->reset.ops = &hns3_reset_ops;
6633 hns3_init_hw_ops(hw);
6634 ret = hns3_init_pf(eth_dev);
6636 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
6640 ret = hns3_init_mac_addrs(eth_dev);
6642 goto err_init_mac_addrs;
6644 hw->adapter_state = HNS3_NIC_INITIALIZED;
6646 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
6648 hns3_err(hw, "Reschedule reset service after dev_init");
6649 hns3_schedule_reset(hns);
6651 /* IMP will wait ready flag before reset */
6652 hns3_notify_reset_ready(hw, false);
6655 hns3_info(hw, "hns3 dev initialization successful!");
6659 hns3_uninit_pf(eth_dev);
6662 rte_free(hw->reset.wait_data);
6665 hns3_mp_uninit(eth_dev);
6668 eth_dev->dev_ops = NULL;
6669 eth_dev->rx_pkt_burst = NULL;
6670 eth_dev->rx_descriptor_status = NULL;
6671 eth_dev->tx_pkt_burst = NULL;
6672 eth_dev->tx_pkt_prepare = NULL;
6673 eth_dev->tx_descriptor_status = NULL;
6678 hns3_dev_uninit(struct rte_eth_dev *eth_dev)
6680 struct hns3_adapter *hns = eth_dev->data->dev_private;
6681 struct hns3_hw *hw = &hns->hw;
6683 PMD_INIT_FUNC_TRACE();
6685 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
6686 hns3_mp_uninit(eth_dev);
6690 if (hw->adapter_state < HNS3_NIC_CLOSING)
6691 hns3_dev_close(eth_dev);
6693 hw->adapter_state = HNS3_NIC_REMOVED;
6698 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
6699 struct rte_pci_device *pci_dev)
6701 return rte_eth_dev_pci_generic_probe(pci_dev,
6702 sizeof(struct hns3_adapter),
6707 eth_hns3_pci_remove(struct rte_pci_device *pci_dev)
6709 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit);
6712 static const struct rte_pci_id pci_id_hns3_map[] = {
6713 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) },
6714 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) },
6715 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) },
6716 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
6717 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
6718 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) },
6719 { .vendor_id = 0, }, /* sentinel */
6722 static struct rte_pci_driver rte_hns3_pmd = {
6723 .id_table = pci_id_hns3_map,
6724 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
6725 .probe = eth_hns3_pci_probe,
6726 .remove = eth_hns3_pci_remove,
6729 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
6730 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
6731 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
6732 RTE_PMD_REGISTER_PARAM_STRING(net_hns3,
6733 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
6734 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
6735 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
6736 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16> ");
6737 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE);
6738 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE);