1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
6 #include <rte_bus_pci.h>
7 #include <ethdev_pci.h>
10 #include "hns3_ethdev.h"
11 #include "hns3_common.h"
12 #include "hns3_logs.h"
13 #include "hns3_rxtx.h"
14 #include "hns3_intr.h"
15 #include "hns3_regs.h"
18 #include "hns3_flow.h"
20 #define HNS3_SERVICE_INTERVAL 1000000 /* us */
21 #define HNS3_SERVICE_QUICK_INTERVAL 10
22 #define HNS3_INVALID_PVID 0xFFFF
24 #define HNS3_FILTER_TYPE_VF 0
25 #define HNS3_FILTER_TYPE_PORT 1
26 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0)
27 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0)
28 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1)
29 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2)
30 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3)
31 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \
32 | HNS3_FILTER_FE_ROCE_EGRESS_B)
33 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \
34 | HNS3_FILTER_FE_ROCE_INGRESS_B)
36 /* Reset related Registers */
37 #define HNS3_GLOBAL_RESET_BIT 0
38 #define HNS3_CORE_RESET_BIT 1
39 #define HNS3_IMP_RESET_BIT 2
40 #define HNS3_FUN_RST_ING_B 0
42 #define HNS3_VECTOR0_IMP_RESET_INT_B 1
43 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U
44 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U
45 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U
47 #define HNS3_RESET_WAIT_MS 100
48 #define HNS3_RESET_WAIT_CNT 200
50 /* FEC mode order defined in HNS3 hardware */
51 #define HNS3_HW_FEC_MODE_NOFEC 0
52 #define HNS3_HW_FEC_MODE_BASER 1
53 #define HNS3_HW_FEC_MODE_RS 2
56 HNS3_VECTOR0_EVENT_RST,
57 HNS3_VECTOR0_EVENT_MBX,
58 HNS3_VECTOR0_EVENT_ERR,
59 HNS3_VECTOR0_EVENT_PTP,
60 HNS3_VECTOR0_EVENT_OTHER,
63 static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
64 { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
65 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
66 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
68 { RTE_ETH_SPEED_NUM_25G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
69 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
70 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
71 RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
73 { RTE_ETH_SPEED_NUM_40G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
74 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
75 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) },
77 { RTE_ETH_SPEED_NUM_50G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
78 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
79 RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
80 RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
82 { RTE_ETH_SPEED_NUM_100G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
83 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
84 RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
86 { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
87 RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
88 RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
91 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
93 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
94 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
96 static int hns3_update_link_info(struct rte_eth_dev *eth_dev);
97 static bool hns3_update_link_status(struct hns3_hw *hw);
99 static int hns3_add_mc_mac_addr(struct hns3_hw *hw,
100 struct rte_ether_addr *mac_addr);
101 static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
102 struct rte_ether_addr *mac_addr);
103 static int hns3_restore_fec(struct hns3_hw *hw);
104 static int hns3_query_dev_fec_info(struct hns3_hw *hw);
105 static int hns3_do_stop(struct hns3_adapter *hns);
106 static int hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds);
107 static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable);
111 hns3_pf_disable_irq0(struct hns3_hw *hw)
113 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
117 hns3_pf_enable_irq0(struct hns3_hw *hw)
119 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
122 static enum hns3_evt_cause
123 hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay,
126 struct hns3_hw *hw = &hns->hw;
128 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
129 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
130 *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
132 hw->reset.stats.imp_cnt++;
133 hns3_warn(hw, "IMP reset detected, clear reset status");
135 hns3_schedule_delayed_reset(hns);
136 hns3_warn(hw, "IMP reset detected, don't clear reset status");
139 return HNS3_VECTOR0_EVENT_RST;
142 static enum hns3_evt_cause
143 hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay,
146 struct hns3_hw *hw = &hns->hw;
148 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
149 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
150 *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
152 hw->reset.stats.global_cnt++;
153 hns3_warn(hw, "Global reset detected, clear reset status");
155 hns3_schedule_delayed_reset(hns);
157 "Global reset detected, don't clear reset status");
160 return HNS3_VECTOR0_EVENT_RST;
163 static enum hns3_evt_cause
164 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
166 struct hns3_hw *hw = &hns->hw;
167 uint32_t vector0_int_stats;
168 uint32_t cmdq_src_val;
169 uint32_t hw_err_src_reg;
171 enum hns3_evt_cause ret;
174 /* fetch the events from their corresponding regs */
175 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
176 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
177 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
179 is_delay = clearval == NULL ? true : false;
181 * Assumption: If by any chance reset and mailbox events are reported
182 * together then we will only process reset event and defer the
183 * processing of the mailbox events. Since, we would have not cleared
184 * RX CMDQ event this time we would receive again another interrupt
185 * from H/W just for the mailbox.
187 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */
188 ret = hns3_proc_imp_reset_event(hns, is_delay, &val);
193 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) {
194 ret = hns3_proc_global_reset_event(hns, is_delay, &val);
198 /* Check for vector0 1588 event source */
199 if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) {
200 val = BIT(HNS3_VECTOR0_1588_INT_B);
201 ret = HNS3_VECTOR0_EVENT_PTP;
205 /* check for vector0 msix event source */
206 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK ||
207 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) {
208 val = vector0_int_stats | hw_err_src_reg;
209 ret = HNS3_VECTOR0_EVENT_ERR;
213 /* check for vector0 mailbox(=CMDQ RX) event source */
214 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) {
215 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
217 ret = HNS3_VECTOR0_EVENT_MBX;
221 val = vector0_int_stats;
222 ret = HNS3_VECTOR0_EVENT_OTHER;
231 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
233 if (event_type == HNS3_VECTOR0_EVENT_RST ||
234 event_type == HNS3_VECTOR0_EVENT_PTP)
235 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
236 else if (event_type == HNS3_VECTOR0_EVENT_MBX)
237 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
241 hns3_clear_all_event_cause(struct hns3_hw *hw)
243 uint32_t vector0_int_stats;
245 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
246 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats)
247 hns3_warn(hw, "Probe during IMP reset interrupt");
249 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats)
250 hns3_warn(hw, "Probe during Global reset interrupt");
252 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST,
253 BIT(HNS3_VECTOR0_IMPRESET_INT_B) |
254 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) |
255 BIT(HNS3_VECTOR0_CORERESET_INT_B));
256 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0);
257 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_PTP,
258 BIT(HNS3_VECTOR0_1588_INT_B));
262 hns3_handle_mac_tnl(struct hns3_hw *hw)
264 struct hns3_cmd_desc desc;
268 /* query and clear mac tnl interrupt */
269 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_TNL_INT, true);
270 ret = hns3_cmd_send(hw, &desc, 1);
272 hns3_err(hw, "failed to query mac tnl int, ret = %d.", ret);
276 status = rte_le_to_cpu_32(desc.data[0]);
278 hns3_warn(hw, "mac tnl int occurs, status = 0x%x.", status);
279 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_MAC_TNL_INT,
281 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_CLR);
282 ret = hns3_cmd_send(hw, &desc, 1);
284 hns3_err(hw, "failed to clear mac tnl int, ret = %d.",
290 hns3_interrupt_handler(void *param)
292 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
293 struct hns3_adapter *hns = dev->data->dev_private;
294 struct hns3_hw *hw = &hns->hw;
295 enum hns3_evt_cause event_cause;
296 uint32_t clearval = 0;
297 uint32_t vector0_int;
301 /* Disable interrupt */
302 hns3_pf_disable_irq0(hw);
304 event_cause = hns3_check_event_cause(hns, &clearval);
305 vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
306 ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
307 cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
308 hns3_clear_event_cause(hw, event_cause, clearval);
309 /* vector 0 interrupt is shared with reset and mailbox source events. */
310 if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
311 hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x "
312 "ras_int_stat:0x%x cmdq_int_stat:0x%x",
313 vector0_int, ras_int, cmdq_int);
314 hns3_handle_mac_tnl(hw);
315 hns3_handle_error(hns);
316 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) {
317 hns3_warn(hw, "received reset interrupt");
318 hns3_schedule_reset(hns);
319 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) {
320 hns3_dev_handle_mbx_msg(hw);
322 hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x "
323 "ras_int_stat:0x%x cmdq_int_stat:0x%x",
324 vector0_int, ras_int, cmdq_int);
327 /* Enable interrupt if it is not cause by reset */
328 hns3_pf_enable_irq0(hw);
332 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on)
334 #define HNS3_VLAN_ID_OFFSET_STEP 160
335 #define HNS3_VLAN_BYTE_SIZE 8
336 struct hns3_vlan_filter_pf_cfg_cmd *req;
337 struct hns3_hw *hw = &hns->hw;
338 uint8_t vlan_offset_byte_val;
339 struct hns3_cmd_desc desc;
340 uint8_t vlan_offset_byte;
341 uint8_t vlan_offset_base;
344 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false);
346 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP;
347 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) /
349 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE);
351 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data;
352 req->vlan_offset = vlan_offset_base;
353 req->vlan_cfg = on ? 0 : 1;
354 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
356 ret = hns3_cmd_send(hw, &desc, 1);
358 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d",
365 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id)
367 struct hns3_user_vlan_table *vlan_entry;
368 struct hns3_pf *pf = &hns->pf;
370 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
371 if (vlan_entry->vlan_id == vlan_id) {
372 if (vlan_entry->hd_tbl_status)
373 hns3_set_port_vlan_filter(hns, vlan_id, 0);
374 LIST_REMOVE(vlan_entry, next);
375 rte_free(vlan_entry);
382 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
385 struct hns3_user_vlan_table *vlan_entry;
386 struct hns3_hw *hw = &hns->hw;
387 struct hns3_pf *pf = &hns->pf;
389 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
390 if (vlan_entry->vlan_id == vlan_id)
394 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0);
395 if (vlan_entry == NULL) {
396 hns3_err(hw, "Failed to malloc hns3 vlan table");
400 vlan_entry->hd_tbl_status = writen_to_tbl;
401 vlan_entry->vlan_id = vlan_id;
403 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);
407 hns3_restore_vlan_table(struct hns3_adapter *hns)
409 struct hns3_user_vlan_table *vlan_entry;
410 struct hns3_hw *hw = &hns->hw;
411 struct hns3_pf *pf = &hns->pf;
415 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE)
416 return hns3_vlan_pvid_configure(hns,
417 hw->port_base_vlan_cfg.pvid, 1);
419 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
420 if (vlan_entry->hd_tbl_status) {
421 vlan_id = vlan_entry->vlan_id;
422 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1);
432 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
434 struct hns3_hw *hw = &hns->hw;
435 bool writen_to_tbl = false;
439 * When vlan filter is enabled, hardware regards packets without vlan
440 * as packets with vlan 0. So, to receive packets without vlan, vlan id
441 * 0 is not allowed to be removed by rte_eth_dev_vlan_filter.
443 if (on == 0 && vlan_id == 0)
447 * When port base vlan enabled, we use port base vlan as the vlan
448 * filter condition. In this case, we don't update vlan filter table
449 * when user add new vlan or remove exist vlan, just update the
450 * vlan list. The vlan id in vlan list will be written in vlan filter
451 * table until port base vlan disabled
453 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
454 ret = hns3_set_port_vlan_filter(hns, vlan_id, on);
455 writen_to_tbl = true;
460 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);
462 hns3_rm_dev_vlan_table(hns, vlan_id);
468 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
470 struct hns3_adapter *hns = dev->data->dev_private;
471 struct hns3_hw *hw = &hns->hw;
474 rte_spinlock_lock(&hw->lock);
475 ret = hns3_vlan_filter_configure(hns, vlan_id, on);
476 rte_spinlock_unlock(&hw->lock);
481 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
484 struct hns3_rx_vlan_type_cfg_cmd *rx_req;
485 struct hns3_tx_vlan_type_cfg_cmd *tx_req;
486 struct hns3_hw *hw = &hns->hw;
487 struct hns3_cmd_desc desc;
490 if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
491 vlan_type != RTE_ETH_VLAN_TYPE_OUTER)) {
492 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
496 if (tpid != RTE_ETHER_TYPE_VLAN) {
497 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type);
501 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
502 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
504 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
505 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
506 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
507 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
508 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
509 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
510 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
511 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid);
514 ret = hns3_cmd_send(hw, &desc, 1);
516 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d",
521 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false);
523 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data;
524 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid);
525 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid);
527 ret = hns3_cmd_send(hw, &desc, 1);
529 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d",
535 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
538 struct hns3_adapter *hns = dev->data->dev_private;
539 struct hns3_hw *hw = &hns->hw;
542 rte_spinlock_lock(&hw->lock);
543 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid);
544 rte_spinlock_unlock(&hw->lock);
549 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
550 struct hns3_rx_vtag_cfg *vcfg)
552 struct hns3_vport_vtag_rx_cfg_cmd *req;
553 struct hns3_hw *hw = &hns->hw;
554 struct hns3_cmd_desc desc;
559 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false);
561 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data;
562 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B,
563 vcfg->strip_tag1_en ? 1 : 0);
564 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B,
565 vcfg->strip_tag2_en ? 1 : 0);
566 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B,
567 vcfg->vlan1_vlan_prionly ? 1 : 0);
568 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
569 vcfg->vlan2_vlan_prionly ? 1 : 0);
571 /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
572 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B,
573 vcfg->strip_tag1_discard_en ? 1 : 0);
574 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B,
575 vcfg->strip_tag2_discard_en ? 1 : 0);
577 * In current version VF is not supported when PF is driven by DPDK
578 * driver, just need to configure parameters for PF vport.
580 vport_id = HNS3_PF_FUNC_ID;
581 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
582 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
583 req->vf_bitmap[req->vf_offset] = bitmap;
585 ret = hns3_cmd_send(hw, &desc, 1);
587 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret);
592 hns3_update_rx_offload_cfg(struct hns3_adapter *hns,
593 struct hns3_rx_vtag_cfg *vcfg)
595 struct hns3_pf *pf = &hns->pf;
596 memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg));
600 hns3_update_tx_offload_cfg(struct hns3_adapter *hns,
601 struct hns3_tx_vtag_cfg *vcfg)
603 struct hns3_pf *pf = &hns->pf;
604 memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg));
608 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
610 struct hns3_rx_vtag_cfg rxvlan_cfg;
611 struct hns3_hw *hw = &hns->hw;
614 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
615 rxvlan_cfg.strip_tag1_en = false;
616 rxvlan_cfg.strip_tag2_en = enable;
617 rxvlan_cfg.strip_tag2_discard_en = false;
619 rxvlan_cfg.strip_tag1_en = enable;
620 rxvlan_cfg.strip_tag2_en = true;
621 rxvlan_cfg.strip_tag2_discard_en = true;
624 rxvlan_cfg.strip_tag1_discard_en = false;
625 rxvlan_cfg.vlan1_vlan_prionly = false;
626 rxvlan_cfg.vlan2_vlan_prionly = false;
627 rxvlan_cfg.rx_vlan_offload_en = enable;
629 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
631 hns3_err(hw, "%s strip rx vtag failed, ret = %d.",
632 enable ? "enable" : "disable", ret);
636 hns3_update_rx_offload_cfg(hns, &rxvlan_cfg);
642 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type,
643 uint8_t fe_type, bool filter_en, uint8_t vf_id)
645 struct hns3_vlan_filter_ctrl_cmd *req;
646 struct hns3_cmd_desc desc;
649 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false);
651 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data;
652 req->vlan_type = vlan_type;
653 req->vlan_fe = filter_en ? fe_type : 0;
656 ret = hns3_cmd_send(hw, &desc, 1);
658 hns3_err(hw, "set vlan filter fail, ret =%d", ret);
664 hns3_vlan_filter_init(struct hns3_adapter *hns)
666 struct hns3_hw *hw = &hns->hw;
669 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF,
670 HNS3_FILTER_FE_EGRESS, false,
673 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret);
677 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
678 HNS3_FILTER_FE_INGRESS, false,
681 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret);
687 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable)
689 struct hns3_hw *hw = &hns->hw;
692 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
693 HNS3_FILTER_FE_INGRESS, enable,
696 hns3_err(hw, "failed to %s port vlan filter, ret = %d",
697 enable ? "enable" : "disable", ret);
703 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
705 struct hns3_adapter *hns = dev->data->dev_private;
706 struct hns3_hw *hw = &hns->hw;
707 struct rte_eth_rxmode *rxmode;
708 unsigned int tmp_mask;
712 rte_spinlock_lock(&hw->lock);
713 rxmode = &dev->data->dev_conf.rxmode;
714 tmp_mask = (unsigned int)mask;
715 if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
716 /* ignore vlan filter configuration during promiscuous mode */
717 if (!dev->data->promiscuous) {
718 /* Enable or disable VLAN filter */
719 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ?
722 ret = hns3_enable_vlan_filter(hns, enable);
724 rte_spinlock_unlock(&hw->lock);
725 hns3_err(hw, "failed to %s rx filter, ret = %d",
726 enable ? "enable" : "disable", ret);
732 if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
733 /* Enable or disable VLAN stripping */
734 enable = rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ?
737 ret = hns3_en_hw_strip_rxvtag(hns, enable);
739 rte_spinlock_unlock(&hw->lock);
740 hns3_err(hw, "failed to %s rx strip, ret = %d",
741 enable ? "enable" : "disable", ret);
746 rte_spinlock_unlock(&hw->lock);
752 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
753 struct hns3_tx_vtag_cfg *vcfg)
755 struct hns3_vport_vtag_tx_cfg_cmd *req;
756 struct hns3_cmd_desc desc;
757 struct hns3_hw *hw = &hns->hw;
762 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false);
764 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data;
765 req->def_vlan_tag1 = vcfg->default_tag1;
766 req->def_vlan_tag2 = vcfg->default_tag2;
767 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B,
768 vcfg->accept_tag1 ? 1 : 0);
769 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B,
770 vcfg->accept_untag1 ? 1 : 0);
771 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B,
772 vcfg->accept_tag2 ? 1 : 0);
773 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B,
774 vcfg->accept_untag2 ? 1 : 0);
775 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B,
776 vcfg->insert_tag1_en ? 1 : 0);
777 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B,
778 vcfg->insert_tag2_en ? 1 : 0);
779 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
781 /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
782 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B,
783 vcfg->tag_shift_mode_en ? 1 : 0);
786 * In current version VF is not supported when PF is driven by DPDK
787 * driver, just need to configure parameters for PF vport.
789 vport_id = HNS3_PF_FUNC_ID;
790 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
791 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
792 req->vf_bitmap[req->vf_offset] = bitmap;
794 ret = hns3_cmd_send(hw, &desc, 1);
796 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret);
802 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state,
805 struct hns3_hw *hw = &hns->hw;
806 struct hns3_tx_vtag_cfg txvlan_cfg;
809 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) {
810 txvlan_cfg.accept_tag1 = true;
811 txvlan_cfg.insert_tag1_en = false;
812 txvlan_cfg.default_tag1 = 0;
814 txvlan_cfg.accept_tag1 =
815 hw->vlan_mode == HNS3_HW_SHIFT_AND_DISCARD_MODE;
816 txvlan_cfg.insert_tag1_en = true;
817 txvlan_cfg.default_tag1 = pvid;
820 txvlan_cfg.accept_untag1 = true;
821 txvlan_cfg.accept_tag2 = true;
822 txvlan_cfg.accept_untag2 = true;
823 txvlan_cfg.insert_tag2_en = false;
824 txvlan_cfg.default_tag2 = 0;
825 txvlan_cfg.tag_shift_mode_en = true;
827 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg);
829 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid,
834 hns3_update_tx_offload_cfg(hns, &txvlan_cfg);
840 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list)
842 struct hns3_user_vlan_table *vlan_entry;
843 struct hns3_pf *pf = &hns->pf;
845 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
846 if (vlan_entry->hd_tbl_status) {
847 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0);
848 vlan_entry->hd_tbl_status = false;
853 vlan_entry = LIST_FIRST(&pf->vlan_list);
855 LIST_REMOVE(vlan_entry, next);
856 rte_free(vlan_entry);
857 vlan_entry = LIST_FIRST(&pf->vlan_list);
863 hns3_add_all_vlan_table(struct hns3_adapter *hns)
865 struct hns3_user_vlan_table *vlan_entry;
866 struct hns3_pf *pf = &hns->pf;
868 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
869 if (!vlan_entry->hd_tbl_status) {
870 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1);
871 vlan_entry->hd_tbl_status = true;
877 hns3_remove_all_vlan_table(struct hns3_adapter *hns)
879 struct hns3_hw *hw = &hns->hw;
882 hns3_rm_all_vlan_table(hns, true);
883 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID) {
884 ret = hns3_set_port_vlan_filter(hns,
885 hw->port_base_vlan_cfg.pvid, 0);
887 hns3_err(hw, "Failed to remove all vlan table, ret =%d",
895 hns3_update_vlan_filter_entries(struct hns3_adapter *hns,
896 uint16_t port_base_vlan_state, uint16_t new_pvid)
898 struct hns3_hw *hw = &hns->hw;
902 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) {
903 old_pvid = hw->port_base_vlan_cfg.pvid;
904 if (old_pvid != HNS3_INVALID_PVID) {
905 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0);
907 hns3_err(hw, "failed to remove old pvid %u, "
908 "ret = %d", old_pvid, ret);
913 hns3_rm_all_vlan_table(hns, false);
914 ret = hns3_set_port_vlan_filter(hns, new_pvid, 1);
916 hns3_err(hw, "failed to add new pvid %u, ret = %d",
921 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0);
923 hns3_err(hw, "failed to remove pvid %u, ret = %d",
928 hns3_add_all_vlan_table(hns);
934 hns3_en_pvid_strip(struct hns3_adapter *hns, int on)
936 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg;
937 struct hns3_rx_vtag_cfg rx_vlan_cfg;
941 rx_strip_en = old_cfg->rx_vlan_offload_en;
943 rx_vlan_cfg.strip_tag1_en = rx_strip_en;
944 rx_vlan_cfg.strip_tag2_en = true;
945 rx_vlan_cfg.strip_tag2_discard_en = true;
947 rx_vlan_cfg.strip_tag1_en = false;
948 rx_vlan_cfg.strip_tag2_en = rx_strip_en;
949 rx_vlan_cfg.strip_tag2_discard_en = false;
951 rx_vlan_cfg.strip_tag1_discard_en = false;
952 rx_vlan_cfg.vlan1_vlan_prionly = false;
953 rx_vlan_cfg.vlan2_vlan_prionly = false;
954 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en;
956 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg);
960 hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg);
965 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on)
967 struct hns3_hw *hw = &hns->hw;
968 uint16_t port_base_vlan_state;
971 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) {
972 if (hw->port_base_vlan_cfg.pvid != HNS3_INVALID_PVID)
973 hns3_warn(hw, "Invalid operation! As current pvid set "
974 "is %u, disable pvid %u is invalid",
975 hw->port_base_vlan_cfg.pvid, pvid);
979 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE :
980 HNS3_PORT_BASE_VLAN_DISABLE;
981 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid);
983 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d",
988 ret = hns3_en_pvid_strip(hns, on);
990 hns3_err(hw, "failed to config rx vlan strip for pvid, "
992 goto pvid_vlan_strip_fail;
995 if (pvid == HNS3_INVALID_PVID)
997 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid);
999 hns3_err(hw, "failed to update vlan filter entries, ret = %d",
1001 goto vlan_filter_set_fail;
1005 hw->port_base_vlan_cfg.state = port_base_vlan_state;
1006 hw->port_base_vlan_cfg.pvid = on ? pvid : HNS3_INVALID_PVID;
1009 vlan_filter_set_fail:
1010 err = hns3_en_pvid_strip(hns, hw->port_base_vlan_cfg.state ==
1011 HNS3_PORT_BASE_VLAN_ENABLE);
1013 hns3_err(hw, "fail to rollback pvid strip, ret = %d", err);
1015 pvid_vlan_strip_fail:
1016 err = hns3_vlan_txvlan_cfg(hns, hw->port_base_vlan_cfg.state,
1017 hw->port_base_vlan_cfg.pvid);
1019 hns3_err(hw, "fail to rollback txvlan status, ret = %d", err);
1025 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1027 struct hns3_adapter *hns = dev->data->dev_private;
1028 struct hns3_hw *hw = &hns->hw;
1029 bool pvid_en_state_change;
1030 uint16_t pvid_state;
1033 if (pvid > RTE_ETHER_MAX_VLAN_ID) {
1034 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid,
1035 RTE_ETHER_MAX_VLAN_ID);
1040 * If PVID configuration state change, should refresh the PVID
1041 * configuration state in struct hns3_tx_queue/hns3_rx_queue.
1043 pvid_state = hw->port_base_vlan_cfg.state;
1044 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) ||
1045 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE))
1046 pvid_en_state_change = false;
1048 pvid_en_state_change = true;
1050 rte_spinlock_lock(&hw->lock);
1051 ret = hns3_vlan_pvid_configure(hns, pvid, on);
1052 rte_spinlock_unlock(&hw->lock);
1056 * Only in HNS3_SW_SHIFT_AND_MODE the PVID related operation in Tx/Rx
1057 * need be processed by PMD.
1059 if (pvid_en_state_change &&
1060 hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
1061 hns3_update_all_queues_pvid_proc_en(hw);
1067 hns3_default_vlan_config(struct hns3_adapter *hns)
1069 struct hns3_hw *hw = &hns->hw;
1073 * When vlan filter is enabled, hardware regards packets without vlan
1074 * as packets with vlan 0. Therefore, if vlan 0 is not in the vlan
1075 * table, packets without vlan won't be received. So, add vlan 0 as
1078 ret = hns3_vlan_filter_configure(hns, 0, 1);
1080 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret);
1085 hns3_init_vlan_config(struct hns3_adapter *hns)
1087 struct hns3_hw *hw = &hns->hw;
1091 * This function can be called in the initialization and reset process,
1092 * when in reset process, it means that hardware had been reseted
1093 * successfully and we need to restore the hardware configuration to
1094 * ensure that the hardware configuration remains unchanged before and
1097 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1098 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
1099 hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
1102 ret = hns3_vlan_filter_init(hns);
1104 hns3_err(hw, "vlan init fail in pf, ret =%d", ret);
1108 ret = hns3_vlan_tpid_configure(hns, RTE_ETH_VLAN_TYPE_INNER,
1109 RTE_ETHER_TYPE_VLAN);
1111 hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
1116 * When in the reinit dev stage of the reset process, the following
1117 * vlan-related configurations may differ from those at initialization,
1118 * we will restore configurations to hardware in hns3_restore_vlan_table
1119 * and hns3_restore_vlan_conf later.
1121 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1122 ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
1124 hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
1128 ret = hns3_en_hw_strip_rxvtag(hns, false);
1130 hns3_err(hw, "rx strip configure fail in pf, ret =%d",
1136 return hns3_default_vlan_config(hns);
1140 hns3_restore_vlan_conf(struct hns3_adapter *hns)
1142 struct hns3_pf *pf = &hns->pf;
1143 struct hns3_hw *hw = &hns->hw;
1148 if (!hw->data->promiscuous) {
1149 /* restore vlan filter states */
1150 offloads = hw->data->dev_conf.rxmode.offloads;
1151 enable = offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER ? true : false;
1152 ret = hns3_enable_vlan_filter(hns, enable);
1154 hns3_err(hw, "failed to restore vlan rx filter conf, "
1160 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg);
1162 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret);
1166 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg);
1168 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret);
1174 hns3_dev_configure_vlan(struct rte_eth_dev *dev)
1176 struct hns3_adapter *hns = dev->data->dev_private;
1177 struct rte_eth_dev_data *data = dev->data;
1178 struct rte_eth_txmode *txmode;
1179 struct hns3_hw *hw = &hns->hw;
1183 txmode = &data->dev_conf.txmode;
1184 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged)
1186 "hw_vlan_reject_tagged or hw_vlan_reject_untagged "
1187 "configuration is not supported! Ignore these two "
1188 "parameters: hw_vlan_reject_tagged(%u), "
1189 "hw_vlan_reject_untagged(%u)",
1190 txmode->hw_vlan_reject_tagged,
1191 txmode->hw_vlan_reject_untagged);
1193 /* Apply vlan offload setting */
1194 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK;
1195 ret = hns3_vlan_offload_set(dev, mask);
1197 hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
1203 * If pvid config is not set in rte_eth_conf, driver needn't to set
1204 * VLAN pvid related configuration to hardware.
1206 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0)
1209 /* Apply pvid setting */
1210 ret = hns3_vlan_pvid_set(dev, txmode->pvid,
1211 txmode->hw_vlan_insert_pvid);
1213 hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d",
1220 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
1221 unsigned int tso_mss_max)
1223 struct hns3_cfg_tso_status_cmd *req;
1224 struct hns3_cmd_desc desc;
1227 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false);
1229 req = (struct hns3_cfg_tso_status_cmd *)desc.data;
1232 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1234 req->tso_mss_min = rte_cpu_to_le_16(tso_mss);
1237 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1239 req->tso_mss_max = rte_cpu_to_le_16(tso_mss);
1241 return hns3_cmd_send(hw, &desc, 1);
1245 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size,
1246 uint16_t *allocated_size, bool is_alloc)
1248 struct hns3_umv_spc_alc_cmd *req;
1249 struct hns3_cmd_desc desc;
1252 req = (struct hns3_umv_spc_alc_cmd *)desc.data;
1253 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false);
1254 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1);
1255 req->space_size = rte_cpu_to_le_32(space_size);
1257 ret = hns3_cmd_send(hw, &desc, 1);
1259 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d",
1260 is_alloc ? "allocate" : "free", ret);
1264 if (is_alloc && allocated_size)
1265 *allocated_size = rte_le_to_cpu_32(desc.data[1]);
1271 hns3_init_umv_space(struct hns3_hw *hw)
1273 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1274 struct hns3_pf *pf = &hns->pf;
1275 uint16_t allocated_size = 0;
1278 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size,
1283 if (allocated_size < pf->wanted_umv_size)
1284 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u",
1285 pf->wanted_umv_size, allocated_size);
1287 pf->max_umv_size = (!!allocated_size) ? allocated_size :
1288 pf->wanted_umv_size;
1289 pf->used_umv_size = 0;
1294 hns3_uninit_umv_space(struct hns3_hw *hw)
1296 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1297 struct hns3_pf *pf = &hns->pf;
1300 if (pf->max_umv_size == 0)
1303 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false);
1307 pf->max_umv_size = 0;
1313 hns3_is_umv_space_full(struct hns3_hw *hw)
1315 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1316 struct hns3_pf *pf = &hns->pf;
1319 is_full = (pf->used_umv_size >= pf->max_umv_size);
1325 hns3_update_umv_space(struct hns3_hw *hw, bool is_free)
1327 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1328 struct hns3_pf *pf = &hns->pf;
1331 if (pf->used_umv_size > 0)
1332 pf->used_umv_size--;
1334 pf->used_umv_size++;
1338 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req,
1339 const uint8_t *addr, bool is_mc)
1341 const unsigned char *mac_addr = addr;
1342 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) |
1343 ((uint32_t)mac_addr[2] << 16) |
1344 ((uint32_t)mac_addr[1] << 8) |
1345 (uint32_t)mac_addr[0];
1346 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4];
1348 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1350 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1351 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1);
1352 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1355 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val);
1356 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff);
1360 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp,
1362 enum hns3_mac_vlan_tbl_opcode op)
1365 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u",
1370 if (op == HNS3_MAC_VLAN_ADD) {
1371 if (resp_code == 0 || resp_code == 1) {
1373 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) {
1374 hns3_err(hw, "add mac addr failed for uc_overflow");
1376 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) {
1377 hns3_err(hw, "add mac addr failed for mc_overflow");
1381 hns3_err(hw, "add mac addr failed for undefined, code=%u",
1384 } else if (op == HNS3_MAC_VLAN_REMOVE) {
1385 if (resp_code == 0) {
1387 } else if (resp_code == 1) {
1388 hns3_dbg(hw, "remove mac addr failed for miss");
1392 hns3_err(hw, "remove mac addr failed for undefined, code=%u",
1395 } else if (op == HNS3_MAC_VLAN_LKUP) {
1396 if (resp_code == 0) {
1398 } else if (resp_code == 1) {
1399 hns3_dbg(hw, "lookup mac addr failed for miss");
1403 hns3_err(hw, "lookup mac addr failed for undefined, code=%u",
1408 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u",
1415 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw,
1416 struct hns3_mac_vlan_tbl_entry_cmd *req,
1417 struct hns3_cmd_desc *desc, uint8_t desc_num)
1424 if (desc_num == HNS3_MC_MAC_VLAN_OPS_DESC_NUM) {
1425 for (i = 0; i < desc_num - 1; i++) {
1426 hns3_cmd_setup_basic_desc(&desc[i],
1427 HNS3_OPC_MAC_VLAN_ADD, true);
1428 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1430 memcpy(desc[i].data, req,
1431 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1433 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_MAC_VLAN_ADD,
1436 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD,
1438 memcpy(desc[0].data, req,
1439 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1441 ret = hns3_cmd_send(hw, desc, desc_num);
1443 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.",
1447 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1448 retval = rte_le_to_cpu_16(desc[0].retval);
1450 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1451 HNS3_MAC_VLAN_LKUP);
1455 hns3_add_mac_vlan_tbl(struct hns3_hw *hw,
1456 struct hns3_mac_vlan_tbl_entry_cmd *req,
1457 struct hns3_cmd_desc *desc, uint8_t desc_num)
1465 if (desc_num == HNS3_UC_MAC_VLAN_OPS_DESC_NUM) {
1466 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_MAC_VLAN_ADD, false);
1467 memcpy(desc->data, req,
1468 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1469 ret = hns3_cmd_send(hw, desc, desc_num);
1470 resp_code = (rte_le_to_cpu_32(desc->data[0]) >> 8) & 0xff;
1471 retval = rte_le_to_cpu_16(desc->retval);
1473 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1476 for (i = 0; i < desc_num; i++) {
1477 hns3_cmd_reuse_desc(&desc[i], false);
1478 if (i == desc_num - 1)
1480 rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT);
1483 rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1485 memcpy(desc[0].data, req,
1486 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1488 ret = hns3_cmd_send(hw, desc, desc_num);
1489 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1490 retval = rte_le_to_cpu_16(desc[0].retval);
1492 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1497 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret);
1505 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw,
1506 struct hns3_mac_vlan_tbl_entry_cmd *req)
1508 struct hns3_cmd_desc desc;
1513 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false);
1515 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1517 ret = hns3_cmd_send(hw, &desc, 1);
1519 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret);
1522 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1523 retval = rte_le_to_cpu_16(desc.retval);
1525 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1526 HNS3_MAC_VLAN_REMOVE);
1530 hns3_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1532 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1533 struct hns3_mac_vlan_tbl_entry_cmd req;
1534 struct hns3_pf *pf = &hns->pf;
1535 struct hns3_cmd_desc desc;
1536 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1537 uint16_t egress_port = 0;
1541 /* check if mac addr is valid */
1542 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1543 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1545 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid",
1550 memset(&req, 0, sizeof(req));
1553 * In current version VF is not supported when PF is driven by DPDK
1554 * driver, just need to configure parameters for PF vport.
1556 vf_id = HNS3_PF_FUNC_ID;
1557 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M,
1558 HNS3_MAC_EPORT_VFID_S, vf_id);
1560 req.egress_port = rte_cpu_to_le_16(egress_port);
1562 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1565 * Lookup the mac address in the mac_vlan table, and add
1566 * it if the entry is inexistent. Repeated unicast entry
1567 * is not allowed in the mac vlan table.
1569 ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc,
1570 HNS3_UC_MAC_VLAN_OPS_DESC_NUM);
1571 if (ret == -ENOENT) {
1572 if (!hns3_is_umv_space_full(hw)) {
1573 ret = hns3_add_mac_vlan_tbl(hw, &req, &desc,
1574 HNS3_UC_MAC_VLAN_OPS_DESC_NUM);
1576 hns3_update_umv_space(hw, false);
1580 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size);
1585 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
1587 /* check if we just hit the duplicate */
1589 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str);
1593 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table",
1600 hns3_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1602 struct hns3_mac_vlan_tbl_entry_cmd req;
1603 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1606 /* check if mac addr is valid */
1607 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1608 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1610 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid",
1615 memset(&req, 0, sizeof(req));
1616 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1617 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1618 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1619 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */
1622 hns3_update_umv_space(hw, true);
1628 hns3_set_default_mac_addr(struct rte_eth_dev *dev,
1629 struct rte_ether_addr *mac_addr)
1631 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1632 struct rte_ether_addr *oaddr;
1633 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1636 rte_spinlock_lock(&hw->lock);
1637 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr;
1638 ret = hw->ops.del_uc_mac_addr(hw, oaddr);
1640 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1642 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
1645 rte_spinlock_unlock(&hw->lock);
1649 ret = hw->ops.add_uc_mac_addr(hw, mac_addr);
1651 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1653 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret);
1654 goto err_add_uc_addr;
1657 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes);
1659 hns3_err(hw, "Failed to configure mac pause address: %d", ret);
1660 goto err_pause_addr_cfg;
1663 rte_ether_addr_copy(mac_addr,
1664 (struct rte_ether_addr *)hw->mac.mac_addr);
1665 rte_spinlock_unlock(&hw->lock);
1670 ret_val = hw->ops.del_uc_mac_addr(hw, mac_addr);
1672 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1675 "Failed to roll back to del setted mac addr(%s): %d",
1680 ret_val = hw->ops.add_uc_mac_addr(hw, oaddr);
1682 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, oaddr);
1683 hns3_warn(hw, "Failed to restore old uc mac addr(%s): %d",
1686 rte_spinlock_unlock(&hw->lock);
1692 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr)
1694 #define HNS3_VF_NUM_IN_FIRST_DESC 192
1698 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) {
1699 word_num = vfid / 32;
1700 bit_num = vfid % 32;
1702 desc[1].data[word_num] &=
1703 rte_cpu_to_le_32(~(1UL << bit_num));
1705 desc[1].data[word_num] |=
1706 rte_cpu_to_le_32(1UL << bit_num);
1708 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32;
1709 bit_num = vfid % 32;
1711 desc[2].data[word_num] &=
1712 rte_cpu_to_le_32(~(1UL << bit_num));
1714 desc[2].data[word_num] |=
1715 rte_cpu_to_le_32(1UL << bit_num);
1720 hns3_add_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1722 struct hns3_cmd_desc desc[HNS3_MC_MAC_VLAN_OPS_DESC_NUM];
1723 struct hns3_mac_vlan_tbl_entry_cmd req;
1724 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1728 /* Check if mac addr is valid */
1729 if (!rte_is_multicast_ether_addr(mac_addr)) {
1730 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1732 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid",
1737 memset(&req, 0, sizeof(req));
1738 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1739 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1740 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc,
1741 HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1743 /* This mac addr do not exist, add new entry for it */
1744 memset(desc[0].data, 0, sizeof(desc[0].data));
1745 memset(desc[1].data, 0, sizeof(desc[0].data));
1746 memset(desc[2].data, 0, sizeof(desc[0].data));
1750 * In current version VF is not supported when PF is driven by DPDK
1751 * driver, just need to configure parameters for PF vport.
1753 vf_id = HNS3_PF_FUNC_ID;
1754 hns3_update_desc_vfid(desc, vf_id, false);
1755 ret = hns3_add_mac_vlan_tbl(hw, &req, desc,
1756 HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1759 hns3_err(hw, "mc mac vlan table is full");
1760 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1762 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret);
1769 hns3_remove_mc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1771 struct hns3_mac_vlan_tbl_entry_cmd req;
1772 struct hns3_cmd_desc desc[3];
1773 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1777 /* Check if mac addr is valid */
1778 if (!rte_is_multicast_ether_addr(mac_addr)) {
1779 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1781 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid",
1786 memset(&req, 0, sizeof(req));
1787 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1788 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1789 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc,
1790 HNS3_MC_MAC_VLAN_OPS_DESC_NUM);
1793 * This mac addr exist, remove this handle's VFID for it.
1794 * In current version VF is not supported when PF is driven by
1795 * DPDK driver, just need to configure parameters for PF vport.
1797 vf_id = HNS3_PF_FUNC_ID;
1798 hns3_update_desc_vfid(desc, vf_id, true);
1800 /* All the vfid is zero, so need to delete this entry */
1801 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1802 } else if (ret == -ENOENT) {
1803 /* This mac addr doesn't exist. */
1808 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1810 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret);
1817 hns3_check_mq_mode(struct rte_eth_dev *dev)
1819 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1820 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1821 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1822 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1823 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1824 struct rte_eth_dcb_tx_conf *dcb_tx_conf;
1829 if (((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) ||
1830 (tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
1831 tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)) {
1832 hns3_err(hw, "VMDQ is not supported, rx_mq_mode = %d, tx_mq_mode = %d.",
1833 rx_mq_mode, tx_mq_mode);
1837 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1838 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
1839 if ((uint32_t)rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
1840 if (dcb_rx_conf->nb_tcs > pf->tc_max) {
1841 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
1842 dcb_rx_conf->nb_tcs, pf->tc_max);
1846 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
1847 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
1848 hns3_err(hw, "on RTE_ETH_MQ_RX_DCB_RSS mode, "
1849 "nb_tcs(%d) != %d or %d in rx direction.",
1850 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
1854 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
1855 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
1856 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
1860 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1861 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
1862 hns3_err(hw, "dcb_tc[%d] = %u in rx direction, "
1863 "is not equal to one in tx direction.",
1864 i, dcb_rx_conf->dcb_tc[i]);
1867 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1868 max_tc = dcb_rx_conf->dcb_tc[i];
1871 num_tc = max_tc + 1;
1872 if (num_tc > dcb_rx_conf->nb_tcs) {
1873 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
1874 num_tc, dcb_rx_conf->nb_tcs);
1883 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en,
1884 enum hns3_ring_type queue_type, uint16_t queue_id)
1886 struct hns3_cmd_desc desc;
1887 struct hns3_ctrl_vector_chain_cmd *req =
1888 (struct hns3_ctrl_vector_chain_cmd *)desc.data;
1889 enum hns3_opcode_type op;
1890 uint16_t tqp_type_and_id = 0;
1895 op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
1896 hns3_cmd_setup_basic_desc(&desc, op, false);
1897 req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M,
1898 HNS3_TQP_INT_ID_L_S);
1899 req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M,
1900 HNS3_TQP_INT_ID_H_S);
1902 if (queue_type == HNS3_RING_TYPE_RX)
1903 gl = HNS3_RING_GL_RX;
1905 gl = HNS3_RING_GL_TX;
1909 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
1911 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
1912 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
1914 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
1915 req->int_cause_num = 1;
1916 ret = hns3_cmd_send(hw, &desc, 1);
1918 hns3_err(hw, "%s TQP %u fail, vector_id = %u, ret = %d.",
1919 en ? "Map" : "Unmap", queue_id, vector_id, ret);
1927 hns3_setup_dcb(struct rte_eth_dev *dev)
1929 struct hns3_adapter *hns = dev->data->dev_private;
1930 struct hns3_hw *hw = &hns->hw;
1933 if (!hns3_dev_get_support(hw, DCB)) {
1934 hns3_err(hw, "this port does not support dcb configurations.");
1938 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
1939 hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
1943 ret = hns3_dcb_configure(hns);
1945 hns3_err(hw, "failed to config dcb: %d", ret);
1951 hns3_check_link_speed(struct hns3_hw *hw, uint32_t link_speeds)
1956 * Some hardware doesn't support auto-negotiation, but users may not
1957 * configure link_speeds (default 0), which means auto-negotiation.
1958 * In this case, it should return success.
1960 if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
1961 hw->mac.support_autoneg == 0)
1964 if (link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
1965 ret = hns3_check_port_speed(hw, link_speeds);
1974 hns3_check_dev_conf(struct rte_eth_dev *dev)
1976 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1977 struct rte_eth_conf *conf = &dev->data->dev_conf;
1980 ret = hns3_check_mq_mode(dev);
1984 return hns3_check_link_speed(hw, conf->link_speeds);
1988 hns3_dev_configure(struct rte_eth_dev *dev)
1990 struct hns3_adapter *hns = dev->data->dev_private;
1991 struct rte_eth_conf *conf = &dev->data->dev_conf;
1992 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
1993 struct hns3_hw *hw = &hns->hw;
1994 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1995 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1996 struct rte_eth_rss_conf rss_conf;
2000 hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
2003 * Some versions of hardware network engine does not support
2004 * individually enable/disable/reset the Tx or Rx queue. These devices
2005 * must enable/disable/reset Tx and Rx queues at the same time. When the
2006 * numbers of Tx queues allocated by upper applications are not equal to
2007 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
2008 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
2009 * work as usual. But these fake queues are imperceptible, and can not
2010 * be used by upper applications.
2012 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
2014 hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
2015 hw->cfg_max_queues = 0;
2019 hw->adapter_state = HNS3_NIC_CONFIGURING;
2020 ret = hns3_check_dev_conf(dev);
2024 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
2025 ret = hns3_setup_dcb(dev);
2030 /* When RSS is not configured, redirect the packet queue 0 */
2031 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
2032 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
2033 rss_conf = conf->rx_adv_conf.rss_conf;
2034 hw->rss_dis_flag = false;
2035 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
2040 ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu);
2044 ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf);
2048 ret = hns3_dev_configure_vlan(dev);
2052 /* config hardware GRO */
2053 gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
2054 ret = hns3_config_gro(hw, gro_en);
2058 hns3_init_rx_ptype_tble(dev);
2059 hw->adapter_state = HNS3_NIC_CONFIGURED;
2064 hw->cfg_max_queues = 0;
2065 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
2066 hw->adapter_state = HNS3_NIC_INITIALIZED;
2072 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
2074 struct hns3_config_max_frm_size_cmd *req;
2075 struct hns3_cmd_desc desc;
2077 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);
2079 req = (struct hns3_config_max_frm_size_cmd *)desc.data;
2080 req->max_frm_size = rte_cpu_to_le_16(new_mps);
2081 req->min_frm_size = RTE_ETHER_MIN_LEN;
2083 return hns3_cmd_send(hw, &desc, 1);
2087 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
2089 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2090 uint16_t original_mps = hns->pf.mps;
2094 ret = hns3_set_mac_mtu(hw, mps);
2096 hns3_err(hw, "failed to set mtu, ret = %d", ret);
2101 ret = hns3_buffer_alloc(hw);
2103 hns3_err(hw, "failed to allocate buffer, ret = %d", ret);
2110 err = hns3_set_mac_mtu(hw, original_mps);
2112 hns3_err(hw, "fail to rollback MTU, err = %d", err);
2115 hns->pf.mps = original_mps;
2121 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2123 struct hns3_adapter *hns = dev->data->dev_private;
2124 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
2125 struct hns3_hw *hw = &hns->hw;
2128 if (dev->data->dev_started) {
2129 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
2130 "before configuration", dev->data->port_id);
2134 rte_spinlock_lock(&hw->lock);
2135 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
2138 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely
2139 * assign to "uint16_t" type variable.
2141 ret = hns3_config_mtu(hw, (uint16_t)frame_size);
2143 rte_spinlock_unlock(&hw->lock);
2144 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d",
2145 dev->data->port_id, mtu, ret);
2149 rte_spinlock_unlock(&hw->lock);
2155 hns3_get_copper_port_speed_capa(uint32_t supported_speed)
2157 uint32_t speed_capa = 0;
2159 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_HD_BIT)
2160 speed_capa |= RTE_ETH_LINK_SPEED_10M_HD;
2161 if (supported_speed & HNS3_PHY_LINK_SPEED_10M_BIT)
2162 speed_capa |= RTE_ETH_LINK_SPEED_10M;
2163 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_HD_BIT)
2164 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
2165 if (supported_speed & HNS3_PHY_LINK_SPEED_100M_BIT)
2166 speed_capa |= RTE_ETH_LINK_SPEED_100M;
2167 if (supported_speed & HNS3_PHY_LINK_SPEED_1000M_BIT)
2168 speed_capa |= RTE_ETH_LINK_SPEED_1G;
2174 hns3_get_firber_port_speed_capa(uint32_t supported_speed)
2176 uint32_t speed_capa = 0;
2178 if (supported_speed & HNS3_FIBER_LINK_SPEED_1G_BIT)
2179 speed_capa |= RTE_ETH_LINK_SPEED_1G;
2180 if (supported_speed & HNS3_FIBER_LINK_SPEED_10G_BIT)
2181 speed_capa |= RTE_ETH_LINK_SPEED_10G;
2182 if (supported_speed & HNS3_FIBER_LINK_SPEED_25G_BIT)
2183 speed_capa |= RTE_ETH_LINK_SPEED_25G;
2184 if (supported_speed & HNS3_FIBER_LINK_SPEED_40G_BIT)
2185 speed_capa |= RTE_ETH_LINK_SPEED_40G;
2186 if (supported_speed & HNS3_FIBER_LINK_SPEED_50G_BIT)
2187 speed_capa |= RTE_ETH_LINK_SPEED_50G;
2188 if (supported_speed & HNS3_FIBER_LINK_SPEED_100G_BIT)
2189 speed_capa |= RTE_ETH_LINK_SPEED_100G;
2190 if (supported_speed & HNS3_FIBER_LINK_SPEED_200G_BIT)
2191 speed_capa |= RTE_ETH_LINK_SPEED_200G;
2197 hns3_get_speed_capa(struct hns3_hw *hw)
2199 struct hns3_mac *mac = &hw->mac;
2200 uint32_t speed_capa;
2202 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
2204 hns3_get_copper_port_speed_capa(mac->supported_speed);
2207 hns3_get_firber_port_speed_capa(mac->supported_speed);
2209 if (mac->support_autoneg == 0)
2210 speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
2216 hns3_update_port_link_info(struct rte_eth_dev *eth_dev)
2218 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2221 (void)hns3_update_link_status(hw);
2223 ret = hns3_update_link_info(eth_dev);
2225 hw->mac.link_status = RTE_ETH_LINK_DOWN;
2231 hns3_setup_linkstatus(struct rte_eth_dev *eth_dev,
2232 struct rte_eth_link *new_link)
2234 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2235 struct hns3_mac *mac = &hw->mac;
2237 switch (mac->link_speed) {
2238 case RTE_ETH_SPEED_NUM_10M:
2239 case RTE_ETH_SPEED_NUM_100M:
2240 case RTE_ETH_SPEED_NUM_1G:
2241 case RTE_ETH_SPEED_NUM_10G:
2242 case RTE_ETH_SPEED_NUM_25G:
2243 case RTE_ETH_SPEED_NUM_40G:
2244 case RTE_ETH_SPEED_NUM_50G:
2245 case RTE_ETH_SPEED_NUM_100G:
2246 case RTE_ETH_SPEED_NUM_200G:
2247 if (mac->link_status)
2248 new_link->link_speed = mac->link_speed;
2251 if (mac->link_status)
2252 new_link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
2256 if (!mac->link_status)
2257 new_link->link_speed = RTE_ETH_SPEED_NUM_NONE;
2259 new_link->link_duplex = mac->link_duplex;
2260 new_link->link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
2261 new_link->link_autoneg = mac->link_autoneg;
2265 hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
2267 #define HNS3_LINK_CHECK_INTERVAL 100 /* 100ms */
2268 #define HNS3_MAX_LINK_CHECK_TIMES 20 /* 2s (100 * 20ms) in total */
2270 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2271 uint32_t retry_cnt = HNS3_MAX_LINK_CHECK_TIMES;
2272 struct hns3_mac *mac = &hw->mac;
2273 struct rte_eth_link new_link;
2276 /* When port is stopped, report link down. */
2277 if (eth_dev->data->dev_started == 0) {
2278 new_link.link_autoneg = mac->link_autoneg;
2279 new_link.link_duplex = mac->link_duplex;
2280 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2281 new_link.link_status = RTE_ETH_LINK_DOWN;
2286 ret = hns3_update_port_link_info(eth_dev);
2288 hns3_err(hw, "failed to get port link info, ret = %d.",
2293 if (!wait_to_complete || mac->link_status == RTE_ETH_LINK_UP)
2296 rte_delay_ms(HNS3_LINK_CHECK_INTERVAL);
2297 } while (retry_cnt--);
2299 memset(&new_link, 0, sizeof(new_link));
2300 hns3_setup_linkstatus(eth_dev, &new_link);
2303 return rte_eth_linkstatus_set(eth_dev, &new_link);
2307 hns3_dev_set_link_up(struct rte_eth_dev *dev)
2309 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2313 * The "tx_pkt_burst" will be restored. But the secondary process does
2314 * not support the mechanism for notifying the primary process.
2316 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2317 hns3_err(hw, "secondary process does not support to set link up.");
2322 * If device isn't started Rx/Tx function is still disabled, setting
2323 * link up is not allowed. But it is probably better to return success
2324 * to reduce the impact on the upper layer.
2326 if (hw->adapter_state != HNS3_NIC_STARTED) {
2327 hns3_info(hw, "device isn't started, can't set link up.");
2331 if (!hw->set_link_down)
2334 rte_spinlock_lock(&hw->lock);
2335 ret = hns3_cfg_mac_mode(hw, true);
2337 rte_spinlock_unlock(&hw->lock);
2338 hns3_err(hw, "failed to set link up, ret = %d", ret);
2342 hw->set_link_down = false;
2343 hns3_start_tx_datapath(dev);
2344 rte_spinlock_unlock(&hw->lock);
2350 hns3_dev_set_link_down(struct rte_eth_dev *dev)
2352 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2356 * The "tx_pkt_burst" will be set to dummy function. But the secondary
2357 * process does not support the mechanism for notifying the primary
2360 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2361 hns3_err(hw, "secondary process does not support to set link down.");
2366 * If device isn't started or the API has been called, link status is
2367 * down, return success.
2369 if (hw->adapter_state != HNS3_NIC_STARTED || hw->set_link_down)
2372 rte_spinlock_lock(&hw->lock);
2373 hns3_stop_tx_datapath(dev);
2374 ret = hns3_cfg_mac_mode(hw, false);
2376 hns3_start_tx_datapath(dev);
2377 rte_spinlock_unlock(&hw->lock);
2378 hns3_err(hw, "failed to set link down, ret = %d", ret);
2382 hw->set_link_down = true;
2383 rte_spinlock_unlock(&hw->lock);
2389 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
2391 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2392 struct hns3_pf *pf = &hns->pf;
2394 if (!(status->pf_state & HNS3_PF_STATE_DONE))
2397 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;
2403 hns3_query_function_status(struct hns3_hw *hw)
2405 #define HNS3_QUERY_MAX_CNT 10
2406 #define HNS3_QUERY_SLEEP_MSCOEND 1
2407 struct hns3_func_status_cmd *req;
2408 struct hns3_cmd_desc desc;
2412 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);
2413 req = (struct hns3_func_status_cmd *)desc.data;
2416 ret = hns3_cmd_send(hw, &desc, 1);
2418 PMD_INIT_LOG(ERR, "query function status failed %d",
2423 /* Check pf reset is done */
2427 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);
2428 } while (timeout++ < HNS3_QUERY_MAX_CNT);
2430 return hns3_parse_func_status(hw, req);
2434 hns3_get_pf_max_tqp_num(struct hns3_hw *hw)
2436 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2437 struct hns3_pf *pf = &hns->pf;
2439 if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) {
2441 * The total_tqps_num obtained from firmware is maximum tqp
2442 * numbers of this port, which should be used for PF and VFs.
2443 * There is no need for pf to have so many tqp numbers in
2444 * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
2445 * coming from config file, is assigned to maximum queue number
2446 * for the PF of this port by user. So users can modify the
2447 * maximum queue number of PF according to their own application
2448 * scenarios, which is more flexible to use. In addition, many
2449 * memories can be saved due to allocating queue statistics
2450 * room according to the actual number of queues required. The
2451 * maximum queue number of PF for network engine with
2452 * revision_id greater than 0x30 is assigned by config file.
2454 if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) {
2455 hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) "
2456 "must be greater than 0.",
2457 RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF);
2461 hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
2462 hw->total_tqps_num);
2465 * Due to the limitation on the number of PF interrupts
2466 * available, the maximum queue number assigned to PF on
2467 * the network engine with revision_id 0x21 is 64.
2469 hw->tqps_num = RTE_MIN(hw->total_tqps_num,
2470 HNS3_MAX_TQP_NUM_HIP08_PF);
2477 hns3_query_pf_resource(struct hns3_hw *hw)
2479 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2480 struct hns3_pf *pf = &hns->pf;
2481 struct hns3_pf_res_cmd *req;
2482 struct hns3_cmd_desc desc;
2485 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
2486 ret = hns3_cmd_send(hw, &desc, 1);
2488 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret);
2492 req = (struct hns3_pf_res_cmd *)desc.data;
2493 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) +
2494 rte_le_to_cpu_16(req->ext_tqp_num);
2495 ret = hns3_get_pf_max_tqp_num(hw);
2499 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
2500 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number);
2502 if (req->tx_buf_size)
2504 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;
2506 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF;
2508 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);
2510 if (req->dv_buf_size)
2512 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;
2514 pf->dv_buf_size = HNS3_DEFAULT_DV;
2516 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
2519 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number),
2520 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
2526 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)
2528 struct hns3_cfg_param_cmd *req;
2529 uint64_t mac_addr_tmp_high;
2530 uint8_t ext_rss_size_max;
2531 uint64_t mac_addr_tmp;
2534 req = (struct hns3_cfg_param_cmd *)desc[0].data;
2536 /* get the configuration */
2537 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2538 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
2539 cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2540 HNS3_CFG_TQP_DESC_N_M,
2541 HNS3_CFG_TQP_DESC_N_S);
2543 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2544 HNS3_CFG_PHY_ADDR_M,
2545 HNS3_CFG_PHY_ADDR_S);
2546 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2547 HNS3_CFG_MEDIA_TP_M,
2548 HNS3_CFG_MEDIA_TP_S);
2549 cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2550 HNS3_CFG_RX_BUF_LEN_M,
2551 HNS3_CFG_RX_BUF_LEN_S);
2552 /* get mac address */
2553 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
2554 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2555 HNS3_CFG_MAC_ADDR_H_M,
2556 HNS3_CFG_MAC_ADDR_H_S);
2558 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
2560 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2561 HNS3_CFG_DEFAULT_SPEED_M,
2562 HNS3_CFG_DEFAULT_SPEED_S);
2563 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2564 HNS3_CFG_RSS_SIZE_M,
2565 HNS3_CFG_RSS_SIZE_S);
2567 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
2568 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
2570 req = (struct hns3_cfg_param_cmd *)desc[1].data;
2571 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);
2573 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2574 HNS3_CFG_SPEED_ABILITY_M,
2575 HNS3_CFG_SPEED_ABILITY_S);
2576 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2577 HNS3_CFG_UMV_TBL_SPACE_M,
2578 HNS3_CFG_UMV_TBL_SPACE_S);
2579 if (!cfg->umv_space)
2580 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;
2582 ext_rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[2]),
2583 HNS3_CFG_EXT_RSS_SIZE_M,
2584 HNS3_CFG_EXT_RSS_SIZE_S);
2586 * Field ext_rss_size_max obtained from firmware will be more flexible
2587 * for future changes and expansions, which is an exponent of 2, instead
2588 * of reading out directly. If this field is not zero, hns3 PF PMD
2589 * uses it as rss_size_max under one TC. Device, whose revision
2590 * id is greater than or equal to PCI_REVISION_ID_HIP09_A, obtains the
2591 * maximum number of queues supported under a TC through this field.
2593 if (ext_rss_size_max)
2594 cfg->rss_size_max = 1U << ext_rss_size_max;
2597 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash
2598 * @hw: pointer to struct hns3_hw
2599 * @hcfg: the config structure to be getted
2602 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)
2604 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];
2605 struct hns3_cfg_param_cmd *req;
2610 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {
2612 req = (struct hns3_cfg_param_cmd *)desc[i].data;
2613 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,
2615 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,
2616 i * HNS3_CFG_RD_LEN_BYTES);
2617 /* Len should be divided by 4 when send to hardware */
2618 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,
2619 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);
2620 req->offset = rte_cpu_to_le_32(offset);
2623 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);
2625 PMD_INIT_LOG(ERR, "get config failed %d.", ret);
2629 hns3_parse_cfg(hcfg, desc);
2635 hns3_parse_speed(int speed_cmd, uint32_t *speed)
2637 switch (speed_cmd) {
2638 case HNS3_CFG_SPEED_10M:
2639 *speed = RTE_ETH_SPEED_NUM_10M;
2641 case HNS3_CFG_SPEED_100M:
2642 *speed = RTE_ETH_SPEED_NUM_100M;
2644 case HNS3_CFG_SPEED_1G:
2645 *speed = RTE_ETH_SPEED_NUM_1G;
2647 case HNS3_CFG_SPEED_10G:
2648 *speed = RTE_ETH_SPEED_NUM_10G;
2650 case HNS3_CFG_SPEED_25G:
2651 *speed = RTE_ETH_SPEED_NUM_25G;
2653 case HNS3_CFG_SPEED_40G:
2654 *speed = RTE_ETH_SPEED_NUM_40G;
2656 case HNS3_CFG_SPEED_50G:
2657 *speed = RTE_ETH_SPEED_NUM_50G;
2659 case HNS3_CFG_SPEED_100G:
2660 *speed = RTE_ETH_SPEED_NUM_100G;
2662 case HNS3_CFG_SPEED_200G:
2663 *speed = RTE_ETH_SPEED_NUM_200G;
2673 hns3_set_default_dev_specifications(struct hns3_hw *hw)
2675 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
2676 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
2677 hw->rss_key_size = HNS3_RSS_KEY_SIZE;
2678 hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
2679 hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
2683 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
2685 struct hns3_dev_specs_0_cmd *req0;
2687 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
2689 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
2690 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
2691 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
2692 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
2693 hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
2697 hns3_check_dev_specifications(struct hns3_hw *hw)
2699 if (hw->rss_ind_tbl_size == 0 ||
2700 hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
2701 hns3_err(hw, "the size of hash lookup table configured (%u)"
2702 " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
2703 HNS3_RSS_IND_TBL_SIZE_MAX);
2711 hns3_query_dev_specifications(struct hns3_hw *hw)
2713 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
2717 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
2718 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
2720 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
2722 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
2724 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
2728 hns3_parse_dev_specifications(hw, desc);
2730 return hns3_check_dev_specifications(hw);
2734 hns3_get_capability(struct hns3_hw *hw)
2736 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2737 struct rte_pci_device *pci_dev;
2738 struct hns3_pf *pf = &hns->pf;
2739 struct rte_eth_dev *eth_dev;
2744 eth_dev = &rte_eth_devices[hw->data->port_id];
2745 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2746 device_id = pci_dev->id.device_id;
2748 if (device_id == HNS3_DEV_ID_25GE_RDMA ||
2749 device_id == HNS3_DEV_ID_50GE_RDMA ||
2750 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC ||
2751 device_id == HNS3_DEV_ID_200G_RDMA)
2752 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
2754 /* Get PCI revision id */
2755 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
2756 HNS3_PCI_REVISION_ID);
2757 if (ret != HNS3_PCI_REVISION_ID_LEN) {
2758 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
2762 hw->revision = revision;
2764 if (revision < PCI_REVISION_ID_HIP09_A) {
2765 hns3_set_default_dev_specifications(hw);
2766 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
2767 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
2768 hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
2769 hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE;
2770 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
2771 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
2772 pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE;
2773 hw->rss_info.ipv6_sctp_offload_supported = false;
2774 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE;
2775 pf->support_multi_tc_pause = false;
2779 ret = hns3_query_dev_specifications(hw);
2782 "failed to query dev specifications, ret = %d",
2787 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
2788 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
2789 hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
2790 hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE;
2791 hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
2792 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
2793 pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE;
2794 hw->rss_info.ipv6_sctp_offload_supported = true;
2795 hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE;
2796 pf->support_multi_tc_pause = true;
2802 hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type)
2806 switch (media_type) {
2807 case HNS3_MEDIA_TYPE_COPPER:
2808 if (!hns3_dev_get_support(hw, COPPER)) {
2810 "Media type is copper, not supported.");
2816 case HNS3_MEDIA_TYPE_FIBER:
2819 case HNS3_MEDIA_TYPE_BACKPLANE:
2820 PMD_INIT_LOG(ERR, "Media type is Backplane, not supported.");
2824 PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type);
2833 hns3_get_board_configuration(struct hns3_hw *hw)
2835 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2836 struct hns3_pf *pf = &hns->pf;
2837 struct hns3_cfg cfg;
2840 ret = hns3_get_board_cfg(hw, &cfg);
2842 PMD_INIT_LOG(ERR, "get board config failed %d", ret);
2846 ret = hns3_check_media_type(hw, cfg.media_type);
2850 hw->mac.media_type = cfg.media_type;
2851 hw->rss_size_max = cfg.rss_size_max;
2852 hw->rss_dis_flag = false;
2853 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
2854 hw->mac.phy_addr = cfg.phy_addr;
2855 hw->num_tx_desc = cfg.tqp_desc_num;
2856 hw->num_rx_desc = cfg.tqp_desc_num;
2857 hw->dcb_info.num_pg = 1;
2858 hw->dcb_info.hw_pfc_map = 0;
2860 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
2862 PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d",
2863 cfg.default_speed, ret);
2867 pf->tc_max = cfg.tc_num;
2868 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) {
2869 PMD_INIT_LOG(WARNING,
2870 "Get TC num(%u) from flash, set TC num to 1",
2875 /* Dev does not support DCB */
2876 if (!hns3_dev_get_support(hw, DCB)) {
2880 pf->pfc_max = pf->tc_max;
2882 hw->dcb_info.num_tc = 1;
2883 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max,
2884 hw->tqps_num / hw->dcb_info.num_tc);
2885 hns3_set_bit(hw->hw_tc_map, 0, 1);
2886 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE;
2888 pf->wanted_umv_size = cfg.umv_space;
2894 hns3_get_configuration(struct hns3_hw *hw)
2898 ret = hns3_query_function_status(hw);
2900 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret);
2904 /* Get device capability */
2905 ret = hns3_get_capability(hw);
2907 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
2911 /* Get pf resource */
2912 ret = hns3_query_pf_resource(hw);
2914 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret);
2918 ret = hns3_get_board_configuration(hw);
2920 PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret);
2924 ret = hns3_query_dev_fec_info(hw);
2927 "failed to query FEC information, ret = %d", ret);
2933 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid,
2934 uint16_t tqp_vid, bool is_pf)
2936 struct hns3_tqp_map_cmd *req;
2937 struct hns3_cmd_desc desc;
2940 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false);
2942 req = (struct hns3_tqp_map_cmd *)desc.data;
2943 req->tqp_id = rte_cpu_to_le_16(tqp_pid);
2944 req->tqp_vf = func_id;
2945 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B;
2947 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B);
2948 req->tqp_vid = rte_cpu_to_le_16(tqp_vid);
2950 ret = hns3_cmd_send(hw, &desc, 1);
2952 PMD_INIT_LOG(ERR, "TQP map failed %d", ret);
2958 hns3_map_tqp(struct hns3_hw *hw)
2964 * In current version, VF is not supported when PF is driven by DPDK
2965 * driver, so we assign total tqps_num tqps allocated to this port
2968 for (i = 0; i < hw->total_tqps_num; i++) {
2969 ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true);
2978 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
2980 struct hns3_config_mac_speed_dup_cmd *req;
2981 struct hns3_cmd_desc desc;
2984 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data;
2986 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false);
2988 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
2991 case RTE_ETH_SPEED_NUM_10M:
2992 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2993 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
2995 case RTE_ETH_SPEED_NUM_100M:
2996 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2997 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
2999 case RTE_ETH_SPEED_NUM_1G:
3000 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3001 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
3003 case RTE_ETH_SPEED_NUM_10G:
3004 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3005 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
3007 case RTE_ETH_SPEED_NUM_25G:
3008 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3009 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
3011 case RTE_ETH_SPEED_NUM_40G:
3012 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3013 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
3015 case RTE_ETH_SPEED_NUM_50G:
3016 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3017 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
3019 case RTE_ETH_SPEED_NUM_100G:
3020 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3021 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
3023 case RTE_ETH_SPEED_NUM_200G:
3024 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3025 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
3028 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed);
3032 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1);
3034 ret = hns3_cmd_send(hw, &desc, 1);
3036 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret);
3042 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3044 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3045 struct hns3_pf *pf = &hns->pf;
3046 struct hns3_priv_buf *priv;
3047 uint32_t i, total_size;
3049 total_size = pf->pkt_buf_size;
3051 /* alloc tx buffer for all enabled tc */
3052 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3053 priv = &buf_alloc->priv_buf[i];
3055 if (hw->hw_tc_map & BIT(i)) {
3056 if (total_size < pf->tx_buf_size)
3059 priv->tx_buf_size = pf->tx_buf_size;
3061 priv->tx_buf_size = 0;
3063 total_size -= priv->tx_buf_size;
3070 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3072 /* TX buffer size is unit by 128 byte */
3073 #define HNS3_BUF_SIZE_UNIT_SHIFT 7
3074 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15)
3075 struct hns3_tx_buff_alloc_cmd *req;
3076 struct hns3_cmd_desc desc;
3081 req = (struct hns3_tx_buff_alloc_cmd *)desc.data;
3083 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0);
3084 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3085 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
3087 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT;
3088 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size |
3089 HNS3_BUF_SIZE_UPDATE_EN_MSK);
3092 ret = hns3_cmd_send(hw, &desc, 1);
3094 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret);
3100 hns3_get_tc_num(struct hns3_hw *hw)
3105 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3106 if (hw->hw_tc_map & BIT(i))
3112 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3114 struct hns3_priv_buf *priv;
3115 uint32_t rx_priv = 0;
3118 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3119 priv = &buf_alloc->priv_buf[i];
3121 rx_priv += priv->buf_size;
3127 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3129 uint32_t total_tx_size = 0;
3132 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3133 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
3135 return total_tx_size;
3138 /* Get the number of pfc enabled TCs, which have private buffer */
3140 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3142 struct hns3_priv_buf *priv;
3146 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3147 priv = &buf_alloc->priv_buf[i];
3148 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3155 /* Get the number of pfc disabled TCs, which have private buffer */
3157 hns3_get_no_pfc_priv_num(struct hns3_hw *hw,
3158 struct hns3_pkt_buf_alloc *buf_alloc)
3160 struct hns3_priv_buf *priv;
3164 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3165 priv = &buf_alloc->priv_buf[i];
3166 if (hw->hw_tc_map & BIT(i) &&
3167 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3175 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
3178 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
3179 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3180 struct hns3_pf *pf = &hns->pf;
3181 uint32_t shared_buf, aligned_mps;
3186 tc_num = hns3_get_tc_num(hw);
3187 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3189 if (hns3_dev_get_support(hw, DCB))
3190 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
3193 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF
3196 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
3197 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc),
3198 HNS3_BUF_SIZE_UNIT);
3200 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);
3201 if (rx_all < rx_priv + shared_std)
3204 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
3205 buf_alloc->s_buf.buf_size = shared_buf;
3206 if (hns3_dev_get_support(hw, DCB)) {
3207 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
3208 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
3209 - roundup(aligned_mps / HNS3_BUF_DIV_BY,
3210 HNS3_BUF_SIZE_UNIT);
3212 buf_alloc->s_buf.self.high =
3213 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3214 buf_alloc->s_buf.self.low = aligned_mps;
3217 if (hns3_dev_get_support(hw, DCB)) {
3218 hi_thrd = shared_buf - pf->dv_buf_size;
3220 if (tc_num <= NEED_RESERVE_TC_NUM)
3221 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT /
3225 hi_thrd = hi_thrd / tc_num;
3227 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps);
3228 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);
3229 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;
3231 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3232 lo_thrd = aligned_mps;
3235 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3236 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
3237 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
3244 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max,
3245 struct hns3_pkt_buf_alloc *buf_alloc)
3247 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3248 struct hns3_pf *pf = &hns->pf;
3249 struct hns3_priv_buf *priv;
3250 uint32_t aligned_mps;
3254 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3255 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3257 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3258 priv = &buf_alloc->priv_buf[i];
3265 if (!(hw->hw_tc_map & BIT(i)))
3269 if (hw->dcb_info.hw_pfc_map & BIT(i)) {
3270 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT;
3271 priv->wl.high = roundup(priv->wl.low + aligned_mps,
3272 HNS3_BUF_SIZE_UNIT);
3275 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) :
3279 priv->buf_size = priv->wl.high + pf->dv_buf_size;
3282 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3286 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw,
3287 struct hns3_pkt_buf_alloc *buf_alloc)
3289 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3290 struct hns3_pf *pf = &hns->pf;
3291 struct hns3_priv_buf *priv;
3292 int no_pfc_priv_num;
3297 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3298 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc);
3300 /* let the last to be cleared first */
3301 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3302 priv = &buf_alloc->priv_buf[i];
3303 mask = BIT((uint8_t)i);
3304 if (hw->hw_tc_map & mask &&
3305 !(hw->dcb_info.hw_pfc_map & mask)) {
3306 /* Clear the no pfc TC private buffer */
3314 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3315 no_pfc_priv_num == 0)
3319 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3323 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
3324 struct hns3_pkt_buf_alloc *buf_alloc)
3326 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3327 struct hns3_pf *pf = &hns->pf;
3328 struct hns3_priv_buf *priv;
3334 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3335 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc);
3337 /* let the last to be cleared first */
3338 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3339 priv = &buf_alloc->priv_buf[i];
3340 mask = BIT((uint8_t)i);
3341 if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) {
3342 /* Reduce the number of pfc TC with private buffer */
3349 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3354 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3358 hns3_only_alloc_priv_buff(struct hns3_hw *hw,
3359 struct hns3_pkt_buf_alloc *buf_alloc)
3361 #define COMPENSATE_BUFFER 0x3C00
3362 #define COMPENSATE_HALF_MPS_NUM 5
3363 #define PRIV_WL_GAP 0x1800
3364 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3365 struct hns3_pf *pf = &hns->pf;
3366 uint32_t tc_num = hns3_get_tc_num(hw);
3367 uint32_t half_mps = pf->mps >> 1;
3368 struct hns3_priv_buf *priv;
3369 uint32_t min_rx_priv;
3373 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3375 rx_priv = rx_priv / tc_num;
3377 if (tc_num <= NEED_RESERVE_TC_NUM)
3378 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
3381 * Minimum value of private buffer in rx direction (min_rx_priv) is
3382 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private
3383 * buffer if rx_priv is greater than min_rx_priv.
3385 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER +
3386 COMPENSATE_HALF_MPS_NUM * half_mps;
3387 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);
3388 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);
3389 if (rx_priv < min_rx_priv)
3392 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3393 priv = &buf_alloc->priv_buf[i];
3399 if (!(hw->hw_tc_map & BIT(i)))
3403 priv->buf_size = rx_priv;
3404 priv->wl.high = rx_priv - pf->dv_buf_size;
3405 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
3408 buf_alloc->s_buf.buf_size = 0;
3414 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
3415 * @hw: pointer to struct hns3_hw
3416 * @buf_alloc: pointer to buffer calculation data
3417 * @return: 0: calculate successful, negative: fail
3420 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3422 /* When DCB is not supported, rx private buffer is not allocated. */
3423 if (!hns3_dev_get_support(hw, DCB)) {
3424 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3425 struct hns3_pf *pf = &hns->pf;
3426 uint32_t rx_all = pf->pkt_buf_size;
3428 rx_all -= hns3_get_tx_buff_alloced(buf_alloc);
3429 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all))
3436 * Try to allocate privated packet buffer for all TCs without share
3439 if (hns3_only_alloc_priv_buff(hw, buf_alloc))
3443 * Try to allocate privated packet buffer for all TCs with share
3446 if (hns3_rx_buf_calc_all(hw, true, buf_alloc))
3450 * For different application scenes, the enabled port number, TC number
3451 * and no_drop TC number are different. In order to obtain the better
3452 * performance, software could allocate the buffer size and configure
3453 * the waterline by trying to decrease the private buffer size according
3454 * to the order, namely, waterline of valid tc, pfc disabled tc, pfc
3457 if (hns3_rx_buf_calc_all(hw, false, buf_alloc))
3460 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc))
3463 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc))
3470 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3472 struct hns3_rx_priv_buff_cmd *req;
3473 struct hns3_cmd_desc desc;
3478 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false);
3479 req = (struct hns3_rx_priv_buff_cmd *)desc.data;
3481 /* Alloc private buffer TCs */
3482 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3483 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i];
3486 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S);
3487 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B);
3490 buf_size = buf_alloc->s_buf.buf_size;
3491 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) |
3492 (1 << HNS3_TC0_PRI_BUF_EN_B));
3494 ret = hns3_cmd_send(hw, &desc, 1);
3496 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret);
3502 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3504 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2
3505 struct hns3_rx_priv_wl_buf *req;
3506 struct hns3_priv_buf *priv;
3507 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM];
3511 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) {
3512 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC,
3514 req = (struct hns3_rx_priv_wl_buf *)desc[i].data;
3516 /* The first descriptor set the NEXT bit to 1 */
3518 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3520 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3522 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3523 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j;
3525 priv = &buf_alloc->priv_buf[idx];
3526 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >>
3528 req->tc_wl[j].high |=
3529 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3530 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >>
3532 req->tc_wl[j].low |=
3533 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3537 /* Send 2 descriptor at one time */
3538 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM);
3540 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d",
3546 hns3_common_thrd_config(struct hns3_hw *hw,
3547 struct hns3_pkt_buf_alloc *buf_alloc)
3549 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2
3550 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf;
3551 struct hns3_rx_com_thrd *req;
3552 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM];
3553 struct hns3_tc_thrd *tc;
3558 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) {
3559 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC,
3561 req = (struct hns3_rx_com_thrd *)&desc[i].data;
3563 /* The first descriptor set the NEXT bit to 1 */
3565 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3567 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3569 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3570 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j;
3571 tc = &s_buf->tc_thrd[tc_idx];
3573 req->com_thrd[j].high =
3574 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S);
3575 req->com_thrd[j].high |=
3576 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3577 req->com_thrd[j].low =
3578 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S);
3579 req->com_thrd[j].low |=
3580 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3584 /* Send 2 descriptors at one time */
3585 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM);
3587 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret);
3593 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3595 struct hns3_shared_buf *buf = &buf_alloc->s_buf;
3596 struct hns3_rx_com_wl *req;
3597 struct hns3_cmd_desc desc;
3600 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false);
3602 req = (struct hns3_rx_com_wl *)desc.data;
3603 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S);
3604 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3606 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S);
3607 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3609 ret = hns3_cmd_send(hw, &desc, 1);
3611 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret);
3617 hns3_buffer_alloc(struct hns3_hw *hw)
3619 struct hns3_pkt_buf_alloc pkt_buf;
3622 memset(&pkt_buf, 0, sizeof(pkt_buf));
3623 ret = hns3_tx_buffer_calc(hw, &pkt_buf);
3626 "could not calc tx buffer size for all TCs %d",
3631 ret = hns3_tx_buffer_alloc(hw, &pkt_buf);
3633 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret);
3637 ret = hns3_rx_buffer_calc(hw, &pkt_buf);
3640 "could not calc rx priv buffer size for all TCs %d",
3645 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf);
3647 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret);
3651 if (hns3_dev_get_support(hw, DCB)) {
3652 ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
3655 "could not configure rx private waterline %d",
3660 ret = hns3_common_thrd_config(hw, &pkt_buf);
3663 "could not configure common threshold %d",
3669 ret = hns3_common_wl_config(hw, &pkt_buf);
3671 PMD_INIT_LOG(ERR, "could not configure common waterline %d",
3678 hns3_mac_init(struct hns3_hw *hw)
3680 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3681 struct hns3_mac *mac = &hw->mac;
3682 struct hns3_pf *pf = &hns->pf;
3685 pf->support_sfp_query = true;
3686 mac->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3687 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
3689 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
3693 mac->link_status = RTE_ETH_LINK_DOWN;
3695 return hns3_config_mtu(hw, pf->mps);
3699 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
3701 #define HNS3_ETHERTYPE_SUCCESS_ADD 0
3702 #define HNS3_ETHERTYPE_ALREADY_ADD 1
3703 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2
3704 #define HNS3_ETHERTYPE_KEY_CONFLICT 3
3709 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
3714 switch (resp_code) {
3715 case HNS3_ETHERTYPE_SUCCESS_ADD:
3716 case HNS3_ETHERTYPE_ALREADY_ADD:
3719 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW:
3721 "add mac ethertype failed for manager table overflow.");
3722 return_status = -EIO;
3724 case HNS3_ETHERTYPE_KEY_CONFLICT:
3725 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict.");
3726 return_status = -EIO;
3730 "add mac ethertype failed for undefined, code=%u.",
3732 return_status = -EIO;
3736 return return_status;
3740 hns3_add_mgr_tbl(struct hns3_hw *hw,
3741 const struct hns3_mac_mgr_tbl_entry_cmd *req)
3743 struct hns3_cmd_desc desc;
3748 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false);
3749 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd));
3751 ret = hns3_cmd_send(hw, &desc, 1);
3754 "add mac ethertype failed for cmd_send, ret =%d.",
3759 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
3760 retval = rte_le_to_cpu_16(desc.retval);
3762 return hns3_get_mac_ethertype_cmd_status(retval, resp_code);
3766 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table,
3767 int *table_item_num)
3769 struct hns3_mac_mgr_tbl_entry_cmd *tbl;
3772 * In current version, we add one item in management table as below:
3773 * 0x0180C200000E -- LLDP MC address
3776 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B;
3777 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP);
3778 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200));
3779 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E));
3780 tbl->i_port_bitmap = 0x1;
3781 *table_item_num = 1;
3785 hns3_init_mgr_tbl(struct hns3_hw *hw)
3787 #define HNS_MAC_MGR_TBL_MAX_SIZE 16
3788 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE];
3793 memset(mgr_table, 0, sizeof(mgr_table));
3794 hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
3795 for (i = 0; i < table_item_num; i++) {
3796 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
3798 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
3808 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
3809 bool en_mc, bool en_bc, int vport_id)
3814 memset(param, 0, sizeof(struct hns3_promisc_param));
3816 param->enable = HNS3_PROMISC_EN_UC;
3818 param->enable |= HNS3_PROMISC_EN_MC;
3820 param->enable |= HNS3_PROMISC_EN_BC;
3821 param->vf_id = vport_id;
3825 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
3827 struct hns3_promisc_cfg_cmd *req;
3828 struct hns3_cmd_desc desc;
3831 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
3833 req = (struct hns3_promisc_cfg_cmd *)desc.data;
3834 req->vf_id = param->vf_id;
3835 req->flag = (param->enable << HNS3_PROMISC_EN_B) |
3836 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
3838 ret = hns3_cmd_send(hw, &desc, 1);
3840 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret);
3846 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
3848 struct hns3_promisc_param param;
3849 bool en_bc_pmc = true;
3853 * In current version VF is not supported when PF is driven by DPDK
3854 * driver, just need to configure parameters for PF vport.
3856 vf_id = HNS3_PF_FUNC_ID;
3858 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
3859 return hns3_cmd_set_promisc_mode(hw, ¶m);
3863 hns3_promisc_init(struct hns3_hw *hw)
3865 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3866 struct hns3_pf *pf = &hns->pf;
3867 struct hns3_promisc_param param;
3871 ret = hns3_set_promisc_mode(hw, false, false);
3873 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret);
3878 * In current version VFs are not supported when PF is driven by DPDK
3879 * driver. After PF has been taken over by DPDK, the original VF will
3880 * be invalid. So, there is a possibility of entry residues. It should
3881 * clear VFs's promisc mode to avoid unnecessary bandwidth usage
3884 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) {
3885 hns3_promisc_param_init(¶m, false, false, false, func_id);
3886 ret = hns3_cmd_set_promisc_mode(hw, ¶m);
3888 PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode,"
3889 " ret = %d", func_id, ret);
3898 hns3_promisc_uninit(struct hns3_hw *hw)
3900 struct hns3_promisc_param param;
3904 func_id = HNS3_PF_FUNC_ID;
3907 * In current version VFs are not supported when PF is driven by
3908 * DPDK driver, and VFs' promisc mode status has been cleared during
3909 * init and their status will not change. So just clear PF's promisc
3910 * mode status during uninit.
3912 hns3_promisc_param_init(¶m, false, false, false, func_id);
3913 ret = hns3_cmd_set_promisc_mode(hw, ¶m);
3915 PMD_INIT_LOG(ERR, "failed to clear promisc status during"
3916 " uninit, ret = %d", ret);
3920 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
3922 bool allmulti = dev->data->all_multicast ? true : false;
3923 struct hns3_adapter *hns = dev->data->dev_private;
3924 struct hns3_hw *hw = &hns->hw;
3929 rte_spinlock_lock(&hw->lock);
3930 ret = hns3_set_promisc_mode(hw, true, true);
3932 rte_spinlock_unlock(&hw->lock);
3933 hns3_err(hw, "failed to enable promiscuous mode, ret = %d",
3939 * When promiscuous mode was enabled, disable the vlan filter to let
3940 * all packets coming in in the receiving direction.
3942 offloads = dev->data->dev_conf.rxmode.offloads;
3943 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
3944 ret = hns3_enable_vlan_filter(hns, false);
3946 hns3_err(hw, "failed to enable promiscuous mode due to "
3947 "failure to disable vlan filter, ret = %d",
3949 err = hns3_set_promisc_mode(hw, false, allmulti);
3951 hns3_err(hw, "failed to restore promiscuous "
3952 "status after disable vlan filter "
3953 "failed during enabling promiscuous "
3954 "mode, ret = %d", ret);
3958 rte_spinlock_unlock(&hw->lock);
3964 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
3966 bool allmulti = dev->data->all_multicast ? true : false;
3967 struct hns3_adapter *hns = dev->data->dev_private;
3968 struct hns3_hw *hw = &hns->hw;
3973 /* If now in all_multicast mode, must remain in all_multicast mode. */
3974 rte_spinlock_lock(&hw->lock);
3975 ret = hns3_set_promisc_mode(hw, false, allmulti);
3977 rte_spinlock_unlock(&hw->lock);
3978 hns3_err(hw, "failed to disable promiscuous mode, ret = %d",
3982 /* when promiscuous mode was disabled, restore the vlan filter status */
3983 offloads = dev->data->dev_conf.rxmode.offloads;
3984 if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
3985 ret = hns3_enable_vlan_filter(hns, true);
3987 hns3_err(hw, "failed to disable promiscuous mode due to"
3988 " failure to restore vlan filter, ret = %d",
3990 err = hns3_set_promisc_mode(hw, true, true);
3992 hns3_err(hw, "failed to restore promiscuous "
3993 "status after enabling vlan filter "
3994 "failed during disabling promiscuous "
3995 "mode, ret = %d", ret);
3998 rte_spinlock_unlock(&hw->lock);
4004 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
4006 struct hns3_adapter *hns = dev->data->dev_private;
4007 struct hns3_hw *hw = &hns->hw;
4010 if (dev->data->promiscuous)
4013 rte_spinlock_lock(&hw->lock);
4014 ret = hns3_set_promisc_mode(hw, false, true);
4015 rte_spinlock_unlock(&hw->lock);
4017 hns3_err(hw, "failed to enable allmulticast mode, ret = %d",
4024 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
4026 struct hns3_adapter *hns = dev->data->dev_private;
4027 struct hns3_hw *hw = &hns->hw;
4030 /* If now in promiscuous mode, must remain in all_multicast mode. */
4031 if (dev->data->promiscuous)
4034 rte_spinlock_lock(&hw->lock);
4035 ret = hns3_set_promisc_mode(hw, false, false);
4036 rte_spinlock_unlock(&hw->lock);
4038 hns3_err(hw, "failed to disable allmulticast mode, ret = %d",
4045 hns3_dev_promisc_restore(struct hns3_adapter *hns)
4047 struct hns3_hw *hw = &hns->hw;
4048 bool allmulti = hw->data->all_multicast ? true : false;
4051 if (hw->data->promiscuous) {
4052 ret = hns3_set_promisc_mode(hw, true, true);
4054 hns3_err(hw, "failed to restore promiscuous mode, "
4059 ret = hns3_set_promisc_mode(hw, false, allmulti);
4061 hns3_err(hw, "failed to restore allmulticast mode, ret = %d",
4067 hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info)
4069 struct hns3_sfp_info_cmd *resp;
4070 struct hns3_cmd_desc desc;
4073 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
4074 resp = (struct hns3_sfp_info_cmd *)desc.data;
4075 resp->query_type = HNS3_ACTIVE_QUERY;
4077 ret = hns3_cmd_send(hw, &desc, 1);
4078 if (ret == -EOPNOTSUPP) {
4079 hns3_warn(hw, "firmware does not support get SFP info,"
4083 hns3_err(hw, "get sfp info failed, ret = %d.", ret);
4088 * In some case, the speed of MAC obtained from firmware may be 0, it
4089 * shouldn't be set to mac->speed.
4091 if (!rte_le_to_cpu_32(resp->sfp_speed))
4094 mac_info->link_speed = rte_le_to_cpu_32(resp->sfp_speed);
4096 * if resp->supported_speed is 0, it means it's an old version
4097 * firmware, do not update these params.
4099 if (resp->supported_speed) {
4100 mac_info->query_type = HNS3_ACTIVE_QUERY;
4101 mac_info->supported_speed =
4102 rte_le_to_cpu_32(resp->supported_speed);
4103 mac_info->support_autoneg = resp->autoneg_ability;
4104 mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED
4105 : RTE_ETH_LINK_AUTONEG;
4107 mac_info->query_type = HNS3_DEFAULT_QUERY;
4114 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
4116 if (!(speed == RTE_ETH_SPEED_NUM_10M || speed == RTE_ETH_SPEED_NUM_100M))
4117 duplex = RTE_ETH_LINK_FULL_DUPLEX;
4123 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
4125 struct hns3_mac *mac = &hw->mac;
4128 duplex = hns3_check_speed_dup(duplex, speed);
4129 if (mac->link_speed == speed && mac->link_duplex == duplex)
4132 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex);
4136 ret = hns3_port_shaper_update(hw, speed);
4140 mac->link_speed = speed;
4141 mac->link_duplex = duplex;
4147 hns3_update_fiber_link_info(struct hns3_hw *hw)
4149 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
4150 struct hns3_mac *mac = &hw->mac;
4151 struct hns3_mac mac_info;
4154 /* If firmware do not support get SFP/qSFP speed, return directly */
4155 if (!pf->support_sfp_query)
4158 memset(&mac_info, 0, sizeof(struct hns3_mac));
4159 ret = hns3_get_sfp_info(hw, &mac_info);
4160 if (ret == -EOPNOTSUPP) {
4161 pf->support_sfp_query = false;
4166 /* Do nothing if no SFP */
4167 if (mac_info.link_speed == RTE_ETH_SPEED_NUM_NONE)
4171 * If query_type is HNS3_ACTIVE_QUERY, it is no need
4172 * to reconfigure the speed of MAC. Otherwise, it indicates
4173 * that the current firmware only supports to obtain the
4174 * speed of the SFP, and the speed of MAC needs to reconfigure.
4176 mac->query_type = mac_info.query_type;
4177 if (mac->query_type == HNS3_ACTIVE_QUERY) {
4178 if (mac_info.link_speed != mac->link_speed) {
4179 ret = hns3_port_shaper_update(hw, mac_info.link_speed);
4184 mac->link_speed = mac_info.link_speed;
4185 mac->supported_speed = mac_info.supported_speed;
4186 mac->support_autoneg = mac_info.support_autoneg;
4187 mac->link_autoneg = mac_info.link_autoneg;
4192 /* Config full duplex for SFP */
4193 return hns3_cfg_mac_speed_dup(hw, mac_info.link_speed,
4194 RTE_ETH_LINK_FULL_DUPLEX);
4198 hns3_parse_copper_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac)
4200 #define HNS3_PHY_SUPPORTED_SPEED_MASK 0x2f
4202 struct hns3_phy_params_bd0_cmd *req;
4205 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
4206 mac->link_speed = rte_le_to_cpu_32(req->speed);
4207 mac->link_duplex = hns3_get_bit(req->duplex,
4208 HNS3_PHY_DUPLEX_CFG_B);
4209 mac->link_autoneg = hns3_get_bit(req->autoneg,
4210 HNS3_PHY_AUTONEG_CFG_B);
4211 mac->advertising = rte_le_to_cpu_32(req->advertising);
4212 mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising);
4213 supported = rte_le_to_cpu_32(req->supported);
4214 mac->supported_speed = supported & HNS3_PHY_SUPPORTED_SPEED_MASK;
4215 mac->support_autoneg = !!(supported & HNS3_PHY_LINK_MODE_AUTONEG_BIT);
4219 hns3_get_copper_phy_params(struct hns3_hw *hw, struct hns3_mac *mac)
4221 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
4225 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
4226 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
4228 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4230 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true);
4232 ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
4234 hns3_err(hw, "get phy parameters failed, ret = %d.", ret);
4238 hns3_parse_copper_phy_params(desc, mac);
4244 hns3_update_copper_link_info(struct hns3_hw *hw)
4246 struct hns3_mac *mac = &hw->mac;
4247 struct hns3_mac mac_info;
4250 memset(&mac_info, 0, sizeof(struct hns3_mac));
4251 ret = hns3_get_copper_phy_params(hw, &mac_info);
4255 if (mac_info.link_speed != mac->link_speed) {
4256 ret = hns3_port_shaper_update(hw, mac_info.link_speed);
4261 mac->link_speed = mac_info.link_speed;
4262 mac->link_duplex = mac_info.link_duplex;
4263 mac->link_autoneg = mac_info.link_autoneg;
4264 mac->supported_speed = mac_info.supported_speed;
4265 mac->advertising = mac_info.advertising;
4266 mac->lp_advertising = mac_info.lp_advertising;
4267 mac->support_autoneg = mac_info.support_autoneg;
4273 hns3_update_link_info(struct rte_eth_dev *eth_dev)
4275 struct hns3_adapter *hns = eth_dev->data->dev_private;
4276 struct hns3_hw *hw = &hns->hw;
4279 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER)
4280 ret = hns3_update_copper_link_info(hw);
4281 else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER)
4282 ret = hns3_update_fiber_link_info(hw);
4288 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
4290 struct hns3_config_mac_mode_cmd *req;
4291 struct hns3_cmd_desc desc;
4292 uint32_t loop_en = 0;
4296 req = (struct hns3_config_mac_mode_cmd *)desc.data;
4298 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false);
4301 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val);
4302 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val);
4303 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val);
4304 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val);
4305 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0);
4306 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0);
4307 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0);
4308 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0);
4309 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val);
4310 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
4313 * If RTE_ETH_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
4314 * when receiving frames. Otherwise, CRC will be stripped.
4316 if (hw->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
4317 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
4319 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
4320 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val);
4321 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val);
4322 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val);
4323 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en);
4325 ret = hns3_cmd_send(hw, &desc, 1);
4327 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret);
4333 hns3_get_mac_link_status(struct hns3_hw *hw)
4335 struct hns3_link_status_cmd *req;
4336 struct hns3_cmd_desc desc;
4340 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true);
4341 ret = hns3_cmd_send(hw, &desc, 1);
4343 hns3_err(hw, "get link status cmd failed %d", ret);
4344 return RTE_ETH_LINK_DOWN;
4347 req = (struct hns3_link_status_cmd *)desc.data;
4348 link_status = req->status & HNS3_LINK_STATUS_UP_M;
4350 return !!link_status;
4354 hns3_update_link_status(struct hns3_hw *hw)
4358 state = hns3_get_mac_link_status(hw);
4359 if (state != hw->mac.link_status) {
4360 hw->mac.link_status = state;
4361 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down");
4369 hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query)
4371 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
4372 struct rte_eth_link new_link;
4376 hns3_update_port_link_info(dev);
4378 memset(&new_link, 0, sizeof(new_link));
4379 hns3_setup_linkstatus(dev, &new_link);
4381 ret = rte_eth_linkstatus_set(dev, &new_link);
4382 if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
4383 hns3_start_report_lse(dev);
4387 hns3_service_handler(void *param)
4389 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
4390 struct hns3_adapter *hns = eth_dev->data->dev_private;
4391 struct hns3_hw *hw = &hns->hw;
4393 if (!hns3_is_reset_pending(hns))
4394 hns3_update_linkstatus_and_event(hw, true);
4396 hns3_warn(hw, "Cancel the query when reset is pending");
4398 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
4402 hns3_init_hardware(struct hns3_adapter *hns)
4404 struct hns3_hw *hw = &hns->hw;
4407 ret = hns3_map_tqp(hw);
4409 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
4413 ret = hns3_init_umv_space(hw);
4415 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret);
4419 ret = hns3_mac_init(hw);
4421 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret);
4425 ret = hns3_init_mgr_tbl(hw);
4427 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret);
4431 ret = hns3_promisc_init(hw);
4433 PMD_INIT_LOG(ERR, "Failed to init promisc: %d",
4438 ret = hns3_init_vlan_config(hns);
4440 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret);
4444 ret = hns3_dcb_init(hw);
4446 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret);
4450 ret = hns3_init_fd_config(hns);
4452 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret);
4456 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
4458 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
4462 ret = hns3_config_gro(hw, false);
4464 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
4469 * In the initialization clearing the all hardware mapping relationship
4470 * configurations between queues and interrupt vectors is needed, so
4471 * some error caused by the residual configurations, such as the
4472 * unexpected interrupt, can be avoid.
4474 ret = hns3_init_ring_with_vector(hw);
4476 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
4483 hns3_uninit_umv_space(hw);
4488 hns3_clear_hw(struct hns3_hw *hw)
4490 struct hns3_cmd_desc desc;
4493 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false);
4495 ret = hns3_cmd_send(hw, &desc, 1);
4496 if (ret && ret != -EOPNOTSUPP)
4503 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable)
4508 * The new firmware support report more hardware error types by
4509 * msix mode. These errors are defined as RAS errors in hardware
4510 * and belong to a different type from the MSI-x errors processed
4511 * by the network driver.
4513 * Network driver should open the new error report on initialization.
4515 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
4516 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0);
4517 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val);
4521 hns3_set_firber_default_support_speed(struct hns3_hw *hw)
4523 struct hns3_mac *mac = &hw->mac;
4525 switch (mac->link_speed) {
4526 case RTE_ETH_SPEED_NUM_1G:
4527 return HNS3_FIBER_LINK_SPEED_1G_BIT;
4528 case RTE_ETH_SPEED_NUM_10G:
4529 return HNS3_FIBER_LINK_SPEED_10G_BIT;
4530 case RTE_ETH_SPEED_NUM_25G:
4531 return HNS3_FIBER_LINK_SPEED_25G_BIT;
4532 case RTE_ETH_SPEED_NUM_40G:
4533 return HNS3_FIBER_LINK_SPEED_40G_BIT;
4534 case RTE_ETH_SPEED_NUM_50G:
4535 return HNS3_FIBER_LINK_SPEED_50G_BIT;
4536 case RTE_ETH_SPEED_NUM_100G:
4537 return HNS3_FIBER_LINK_SPEED_100G_BIT;
4538 case RTE_ETH_SPEED_NUM_200G:
4539 return HNS3_FIBER_LINK_SPEED_200G_BIT;
4541 hns3_warn(hw, "invalid speed %u Mbps.", mac->link_speed);
4547 * Validity of supported_speed for fiber and copper media type can be
4548 * guaranteed by the following policy:
4550 * Although the initialization of the phy in the firmware may not be
4551 * completed, the firmware can guarantees that the supported_speed is
4554 * If the version of firmware supports the active query way of the
4555 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained
4556 * through it. If unsupported, use the SFP's speed as the value of the
4560 hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev)
4562 struct hns3_adapter *hns = eth_dev->data->dev_private;
4563 struct hns3_hw *hw = &hns->hw;
4564 struct hns3_mac *mac = &hw->mac;
4567 ret = hns3_update_link_info(eth_dev);
4571 if (mac->media_type == HNS3_MEDIA_TYPE_FIBER) {
4573 * Some firmware does not support the report of supported_speed,
4574 * and only report the effective speed of SFP. In this case, it
4575 * is necessary to use the SFP's speed as the supported_speed.
4577 if (mac->supported_speed == 0)
4578 mac->supported_speed =
4579 hns3_set_firber_default_support_speed(hw);
4586 hns3_get_fc_autoneg_capability(struct hns3_adapter *hns)
4588 struct hns3_mac *mac = &hns->hw.mac;
4590 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) {
4591 hns->pf.support_fc_autoneg = true;
4596 * Flow control auto-negotiation requires the cooperation of the driver
4597 * and firmware. Currently, the optical port does not support flow
4598 * control auto-negotiation.
4600 hns->pf.support_fc_autoneg = false;
4604 hns3_init_pf(struct rte_eth_dev *eth_dev)
4606 struct rte_device *dev = eth_dev->device;
4607 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4608 struct hns3_adapter *hns = eth_dev->data->dev_private;
4609 struct hns3_hw *hw = &hns->hw;
4612 PMD_INIT_FUNC_TRACE();
4614 /* Get hardware io base address from pcie BAR2 IO space */
4615 hw->io_base = pci_dev->mem_resource[2].addr;
4617 /* Firmware command queue initialize */
4618 ret = hns3_cmd_init_queue(hw);
4620 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
4621 goto err_cmd_init_queue;
4624 hns3_clear_all_event_cause(hw);
4626 /* Firmware command initialize */
4627 ret = hns3_cmd_init(hw);
4629 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
4633 hns3_tx_push_init(eth_dev);
4636 * To ensure that the hardware environment is clean during
4637 * initialization, the driver actively clear the hardware environment
4638 * during initialization, including PF and corresponding VFs' vlan, mac,
4639 * flow table configurations, etc.
4641 ret = hns3_clear_hw(hw);
4643 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret);
4647 /* Hardware statistics of imissed registers cleared. */
4648 ret = hns3_update_imissed_stats(hw, true);
4650 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
4654 hns3_config_all_msix_error(hw, true);
4656 ret = rte_intr_callback_register(pci_dev->intr_handle,
4657 hns3_interrupt_handler,
4660 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
4661 goto err_intr_callback_register;
4664 ret = hns3_ptp_init(hw);
4666 goto err_get_config;
4668 /* Enable interrupt */
4669 rte_intr_enable(pci_dev->intr_handle);
4670 hns3_pf_enable_irq0(hw);
4672 /* Get configuration */
4673 ret = hns3_get_configuration(hw);
4675 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
4676 goto err_get_config;
4679 ret = hns3_tqp_stats_init(hw);
4681 goto err_get_config;
4683 ret = hns3_init_hardware(hns);
4685 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
4689 /* Initialize flow director filter list & hash */
4690 ret = hns3_fdir_filter_init(hns);
4692 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
4696 hns3_rss_set_default_args(hw);
4698 ret = hns3_enable_hw_error_intr(hns, true);
4700 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d",
4702 goto err_enable_intr;
4705 ret = hns3_get_port_supported_speed(eth_dev);
4707 PMD_INIT_LOG(ERR, "failed to get speed capabilities supported "
4708 "by device, ret = %d.", ret);
4709 goto err_supported_speed;
4712 hns3_get_fc_autoneg_capability(hns);
4714 hns3_tm_conf_init(eth_dev);
4718 err_supported_speed:
4719 (void)hns3_enable_hw_error_intr(hns, false);
4721 hns3_fdir_filter_uninit(hns);
4723 hns3_uninit_umv_space(hw);
4725 hns3_tqp_stats_uninit(hw);
4727 hns3_pf_disable_irq0(hw);
4728 rte_intr_disable(pci_dev->intr_handle);
4729 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler,
4731 err_intr_callback_register:
4733 hns3_cmd_uninit(hw);
4734 hns3_cmd_destroy_queue(hw);
4742 hns3_uninit_pf(struct rte_eth_dev *eth_dev)
4744 struct hns3_adapter *hns = eth_dev->data->dev_private;
4745 struct rte_device *dev = eth_dev->device;
4746 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4747 struct hns3_hw *hw = &hns->hw;
4749 PMD_INIT_FUNC_TRACE();
4751 hns3_tm_conf_uninit(eth_dev);
4752 hns3_enable_hw_error_intr(hns, false);
4753 hns3_rss_uninit(hns);
4754 (void)hns3_config_gro(hw, false);
4755 hns3_promisc_uninit(hw);
4756 hns3_flow_uninit(eth_dev);
4757 hns3_fdir_filter_uninit(hns);
4758 hns3_uninit_umv_space(hw);
4759 hns3_tqp_stats_uninit(hw);
4760 hns3_config_mac_tnl_int(hw, false);
4761 hns3_pf_disable_irq0(hw);
4762 rte_intr_disable(pci_dev->intr_handle);
4763 hns3_intr_unregister(pci_dev->intr_handle, hns3_interrupt_handler,
4765 hns3_config_all_msix_error(hw, false);
4766 hns3_cmd_uninit(hw);
4767 hns3_cmd_destroy_queue(hw);
4772 hns3_convert_link_speeds2bitmap_copper(uint32_t link_speeds)
4776 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
4777 case RTE_ETH_LINK_SPEED_10M:
4778 speed_bit = HNS3_PHY_LINK_SPEED_10M_BIT;
4780 case RTE_ETH_LINK_SPEED_10M_HD:
4781 speed_bit = HNS3_PHY_LINK_SPEED_10M_HD_BIT;
4783 case RTE_ETH_LINK_SPEED_100M:
4784 speed_bit = HNS3_PHY_LINK_SPEED_100M_BIT;
4786 case RTE_ETH_LINK_SPEED_100M_HD:
4787 speed_bit = HNS3_PHY_LINK_SPEED_100M_HD_BIT;
4789 case RTE_ETH_LINK_SPEED_1G:
4790 speed_bit = HNS3_PHY_LINK_SPEED_1000M_BIT;
4801 hns3_convert_link_speeds2bitmap_fiber(uint32_t link_speeds)
4805 switch (link_speeds & ~RTE_ETH_LINK_SPEED_FIXED) {
4806 case RTE_ETH_LINK_SPEED_1G:
4807 speed_bit = HNS3_FIBER_LINK_SPEED_1G_BIT;
4809 case RTE_ETH_LINK_SPEED_10G:
4810 speed_bit = HNS3_FIBER_LINK_SPEED_10G_BIT;
4812 case RTE_ETH_LINK_SPEED_25G:
4813 speed_bit = HNS3_FIBER_LINK_SPEED_25G_BIT;
4815 case RTE_ETH_LINK_SPEED_40G:
4816 speed_bit = HNS3_FIBER_LINK_SPEED_40G_BIT;
4818 case RTE_ETH_LINK_SPEED_50G:
4819 speed_bit = HNS3_FIBER_LINK_SPEED_50G_BIT;
4821 case RTE_ETH_LINK_SPEED_100G:
4822 speed_bit = HNS3_FIBER_LINK_SPEED_100G_BIT;
4824 case RTE_ETH_LINK_SPEED_200G:
4825 speed_bit = HNS3_FIBER_LINK_SPEED_200G_BIT;
4836 hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds)
4838 struct hns3_mac *mac = &hw->mac;
4839 uint32_t supported_speed = mac->supported_speed;
4840 uint32_t speed_bit = 0;
4842 if (mac->media_type == HNS3_MEDIA_TYPE_COPPER)
4843 speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds);
4844 else if (mac->media_type == HNS3_MEDIA_TYPE_FIBER)
4845 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds);
4847 if (!(speed_bit & supported_speed)) {
4848 hns3_err(hw, "link_speeds(0x%x) exceeds the supported speed capability or is incorrect.",
4857 hns3_get_link_speed(uint32_t link_speeds)
4859 uint32_t speed = RTE_ETH_SPEED_NUM_NONE;
4861 if (link_speeds & RTE_ETH_LINK_SPEED_10M ||
4862 link_speeds & RTE_ETH_LINK_SPEED_10M_HD)
4863 speed = RTE_ETH_SPEED_NUM_10M;
4864 if (link_speeds & RTE_ETH_LINK_SPEED_100M ||
4865 link_speeds & RTE_ETH_LINK_SPEED_100M_HD)
4866 speed = RTE_ETH_SPEED_NUM_100M;
4867 if (link_speeds & RTE_ETH_LINK_SPEED_1G)
4868 speed = RTE_ETH_SPEED_NUM_1G;
4869 if (link_speeds & RTE_ETH_LINK_SPEED_10G)
4870 speed = RTE_ETH_SPEED_NUM_10G;
4871 if (link_speeds & RTE_ETH_LINK_SPEED_25G)
4872 speed = RTE_ETH_SPEED_NUM_25G;
4873 if (link_speeds & RTE_ETH_LINK_SPEED_40G)
4874 speed = RTE_ETH_SPEED_NUM_40G;
4875 if (link_speeds & RTE_ETH_LINK_SPEED_50G)
4876 speed = RTE_ETH_SPEED_NUM_50G;
4877 if (link_speeds & RTE_ETH_LINK_SPEED_100G)
4878 speed = RTE_ETH_SPEED_NUM_100G;
4879 if (link_speeds & RTE_ETH_LINK_SPEED_200G)
4880 speed = RTE_ETH_SPEED_NUM_200G;
4886 hns3_get_link_duplex(uint32_t link_speeds)
4888 if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
4889 (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
4890 return RTE_ETH_LINK_HALF_DUPLEX;
4892 return RTE_ETH_LINK_FULL_DUPLEX;
4896 hns3_set_copper_port_link_speed(struct hns3_hw *hw,
4897 struct hns3_set_link_speed_cfg *cfg)
4899 struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM];
4900 struct hns3_phy_params_bd0_cmd *req;
4903 for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) {
4904 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG,
4906 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
4908 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, false);
4909 req = (struct hns3_phy_params_bd0_cmd *)desc[0].data;
4910 req->autoneg = cfg->autoneg;
4913 * The full speed capability is used to negotiate when
4914 * auto-negotiation is enabled.
4917 req->advertising = HNS3_PHY_LINK_SPEED_10M_BIT |
4918 HNS3_PHY_LINK_SPEED_10M_HD_BIT |
4919 HNS3_PHY_LINK_SPEED_100M_BIT |
4920 HNS3_PHY_LINK_SPEED_100M_HD_BIT |
4921 HNS3_PHY_LINK_SPEED_1000M_BIT;
4923 req->speed = cfg->speed;
4924 req->duplex = cfg->duplex;
4927 return hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM);
4931 hns3_set_autoneg(struct hns3_hw *hw, bool enable)
4933 struct hns3_config_auto_neg_cmd *req;
4934 struct hns3_cmd_desc desc;
4938 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_AN_MODE, false);
4940 req = (struct hns3_config_auto_neg_cmd *)desc.data;
4942 hns3_set_bit(flag, HNS3_MAC_CFG_AN_EN_B, 1);
4943 req->cfg_an_cmd_flag = rte_cpu_to_le_32(flag);
4945 ret = hns3_cmd_send(hw, &desc, 1);
4947 hns3_err(hw, "autoneg set cmd failed, ret = %d.", ret);
4953 hns3_set_fiber_port_link_speed(struct hns3_hw *hw,
4954 struct hns3_set_link_speed_cfg *cfg)
4958 if (hw->mac.support_autoneg) {
4959 ret = hns3_set_autoneg(hw, cfg->autoneg);
4961 hns3_err(hw, "failed to configure auto-negotiation.");
4966 * To enable auto-negotiation, we only need to open the switch
4967 * of auto-negotiation, then firmware sets all speed
4975 * Some hardware doesn't support auto-negotiation, but users may not
4976 * configure link_speeds (default 0), which means auto-negotiation.
4977 * In this case, a warning message need to be printed, instead of
4981 hns3_warn(hw, "auto-negotiation is not supported, use default fixed speed!");
4985 return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex);
4989 hns3_set_port_link_speed(struct hns3_hw *hw,
4990 struct hns3_set_link_speed_cfg *cfg)
4994 if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) {
4995 #if defined(RTE_HNS3_ONLY_1630_FPGA)
4996 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
5001 ret = hns3_set_copper_port_link_speed(hw, cfg);
5003 hns3_err(hw, "failed to set copper port link speed,"
5007 } else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) {
5008 ret = hns3_set_fiber_port_link_speed(hw, cfg);
5010 hns3_err(hw, "failed to set fiber port link speed,"
5020 hns3_apply_link_speed(struct hns3_hw *hw)
5022 struct rte_eth_conf *conf = &hw->data->dev_conf;
5023 struct hns3_set_link_speed_cfg cfg;
5025 memset(&cfg, 0, sizeof(struct hns3_set_link_speed_cfg));
5026 cfg.autoneg = (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) ?
5027 RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
5028 if (cfg.autoneg != RTE_ETH_LINK_AUTONEG) {
5029 cfg.speed = hns3_get_link_speed(conf->link_speeds);
5030 cfg.duplex = hns3_get_link_duplex(conf->link_speeds);
5033 return hns3_set_port_link_speed(hw, &cfg);
5037 hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
5039 struct hns3_hw *hw = &hns->hw;
5043 ret = hns3_update_queue_map_configure(hns);
5045 hns3_err(hw, "failed to update queue mapping configuration, ret = %d",
5050 /* Note: hns3_tm_conf_update must be called after configuring DCB. */
5051 ret = hns3_tm_conf_update(hw);
5053 PMD_INIT_LOG(ERR, "failed to update tm conf, ret = %d.", ret);
5057 hns3_enable_rxd_adv_layout(hw);
5059 ret = hns3_init_queues(hns, reset_queue);
5061 PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret);
5065 link_en = hw->set_link_down ? false : true;
5066 ret = hns3_cfg_mac_mode(hw, link_en);
5068 PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret);
5069 goto err_config_mac_mode;
5072 ret = hns3_apply_link_speed(hw);
5074 goto err_set_link_speed;
5079 (void)hns3_cfg_mac_mode(hw, false);
5081 err_config_mac_mode:
5082 hns3_dev_release_mbufs(hns);
5084 * Here is exception handling, hns3_reset_all_tqps will have the
5085 * corresponding error message if it is handled incorrectly, so it is
5086 * not necessary to check hns3_reset_all_tqps return value, here keep
5087 * ret as the error code causing the exception.
5089 (void)hns3_reset_all_tqps(hns);
5094 hns3_restore_filter(struct rte_eth_dev *dev)
5096 hns3_restore_rss_filter(dev);
5100 hns3_dev_start(struct rte_eth_dev *dev)
5102 struct hns3_adapter *hns = dev->data->dev_private;
5103 struct hns3_hw *hw = &hns->hw;
5104 bool old_state = hw->set_link_down;
5107 PMD_INIT_FUNC_TRACE();
5108 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
5111 rte_spinlock_lock(&hw->lock);
5112 hw->adapter_state = HNS3_NIC_STARTING;
5115 * If the dev_set_link_down() API has been called, the "set_link_down"
5116 * flag can be cleared by dev_start() API. In addition, the flag should
5117 * also be cleared before calling hns3_do_start() so that MAC can be
5118 * enabled in dev_start stage.
5120 hw->set_link_down = false;
5121 ret = hns3_do_start(hns, true);
5125 ret = hns3_map_rx_interrupt(dev);
5127 goto map_rx_inter_err;
5130 * There are three register used to control the status of a TQP
5131 * (contains a pair of Tx queue and Rx queue) in the new version network
5132 * engine. One is used to control the enabling of Tx queue, the other is
5133 * used to control the enabling of Rx queue, and the last is the master
5134 * switch used to control the enabling of the tqp. The Tx register and
5135 * TQP register must be enabled at the same time to enable a Tx queue.
5136 * The same applies to the Rx queue. For the older network engine, this
5137 * function only refresh the enabled flag, and it is used to update the
5138 * status of queue in the dpdk framework.
5140 ret = hns3_start_all_txqs(dev);
5142 goto map_rx_inter_err;
5144 ret = hns3_start_all_rxqs(dev);
5146 goto start_all_rxqs_fail;
5148 hw->adapter_state = HNS3_NIC_STARTED;
5149 rte_spinlock_unlock(&hw->lock);
5151 hns3_rx_scattered_calc(dev);
5152 hns3_set_rxtx_function(dev);
5153 hns3_mp_req_start_rxtx(dev);
5155 hns3_restore_filter(dev);
5157 /* Enable interrupt of all rx queues before enabling queues */
5158 hns3_dev_all_rx_queue_intr_enable(hw, true);
5161 * After finished the initialization, enable tqps to receive/transmit
5162 * packets and refresh all queue status.
5164 hns3_start_tqps(hw);
5166 hns3_tm_dev_start_proc(hw);
5168 if (dev->data->dev_conf.intr_conf.lsc != 0)
5169 hns3_dev_link_update(dev, 0);
5170 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
5172 hns3_info(hw, "hns3 dev start successful!");
5176 start_all_rxqs_fail:
5177 hns3_stop_all_txqs(dev);
5179 (void)hns3_do_stop(hns);
5181 hw->set_link_down = old_state;
5182 hw->adapter_state = HNS3_NIC_CONFIGURED;
5183 rte_spinlock_unlock(&hw->lock);
5189 hns3_do_stop(struct hns3_adapter *hns)
5191 struct hns3_hw *hw = &hns->hw;
5195 * The "hns3_do_stop" function will also be called by .stop_service to
5196 * prepare reset. At the time of global or IMP reset, the command cannot
5197 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
5198 * accessed during the reset process. So the mbuf can not be released
5199 * during reset and is required to be released after the reset is
5202 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
5203 hns3_dev_release_mbufs(hns);
5205 ret = hns3_cfg_mac_mode(hw, false);
5208 hw->mac.link_status = RTE_ETH_LINK_DOWN;
5210 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
5211 hns3_configure_all_mac_addr(hns, true);
5212 ret = hns3_reset_all_tqps(hns);
5214 hns3_err(hw, "failed to reset all queues ret = %d.",
5224 hns3_dev_stop(struct rte_eth_dev *dev)
5226 struct hns3_adapter *hns = dev->data->dev_private;
5227 struct hns3_hw *hw = &hns->hw;
5229 PMD_INIT_FUNC_TRACE();
5230 dev->data->dev_started = 0;
5232 hw->adapter_state = HNS3_NIC_STOPPING;
5233 hns3_set_rxtx_function(dev);
5235 /* Disable datapath on secondary process. */
5236 hns3_mp_req_stop_rxtx(dev);
5237 /* Prevent crashes when queues are still in use. */
5238 rte_delay_ms(hw->cfg_max_queues);
5240 rte_spinlock_lock(&hw->lock);
5241 if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
5242 hns3_tm_dev_stop_proc(hw);
5243 hns3_config_mac_tnl_int(hw, false);
5246 hns3_unmap_rx_interrupt(dev);
5247 hw->adapter_state = HNS3_NIC_CONFIGURED;
5249 hns3_rx_scattered_reset(dev);
5250 rte_eal_alarm_cancel(hns3_service_handler, dev);
5251 hns3_stop_report_lse(dev);
5252 rte_spinlock_unlock(&hw->lock);
5258 hns3_dev_close(struct rte_eth_dev *eth_dev)
5260 struct hns3_adapter *hns = eth_dev->data->dev_private;
5261 struct hns3_hw *hw = &hns->hw;
5264 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
5265 hns3_mp_uninit(eth_dev);
5269 if (hw->adapter_state == HNS3_NIC_STARTED)
5270 ret = hns3_dev_stop(eth_dev);
5272 hw->adapter_state = HNS3_NIC_CLOSING;
5273 hns3_reset_abort(hns);
5274 hw->adapter_state = HNS3_NIC_CLOSED;
5276 hns3_configure_all_mc_mac_addr(hns, true);
5277 hns3_remove_all_vlan_table(hns);
5278 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
5279 hns3_uninit_pf(eth_dev);
5280 hns3_free_all_queues(eth_dev);
5281 rte_free(hw->reset.wait_data);
5282 hns3_mp_uninit(eth_dev);
5283 hns3_warn(hw, "Close port %u finished", hw->data->port_id);
5289 hns3_get_autoneg_rxtx_pause_copper(struct hns3_hw *hw, bool *rx_pause,
5292 struct hns3_mac *mac = &hw->mac;
5293 uint32_t advertising = mac->advertising;
5294 uint32_t lp_advertising = mac->lp_advertising;
5298 if (advertising & lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT) {
5301 } else if (advertising & lp_advertising &
5302 HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT) {
5303 if (advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT)
5305 else if (lp_advertising & HNS3_PHY_LINK_MODE_PAUSE_BIT)
5310 static enum hns3_fc_mode
5311 hns3_get_autoneg_fc_mode(struct hns3_hw *hw)
5313 enum hns3_fc_mode current_mode;
5314 bool rx_pause = false;
5315 bool tx_pause = false;
5317 switch (hw->mac.media_type) {
5318 case HNS3_MEDIA_TYPE_COPPER:
5319 hns3_get_autoneg_rxtx_pause_copper(hw, &rx_pause, &tx_pause);
5323 * Flow control auto-negotiation is not supported for fiber and
5324 * backplane media type.
5326 case HNS3_MEDIA_TYPE_FIBER:
5327 case HNS3_MEDIA_TYPE_BACKPLANE:
5328 hns3_err(hw, "autoneg FC mode can't be obtained, but flow control auto-negotiation is enabled.");
5329 current_mode = hw->requested_fc_mode;
5332 hns3_err(hw, "autoneg FC mode can't be obtained for unknown media type(%u).",
5333 hw->mac.media_type);
5334 current_mode = HNS3_FC_NONE;
5338 if (rx_pause && tx_pause)
5339 current_mode = HNS3_FC_FULL;
5341 current_mode = HNS3_FC_RX_PAUSE;
5343 current_mode = HNS3_FC_TX_PAUSE;
5345 current_mode = HNS3_FC_NONE;
5348 return current_mode;
5351 static enum hns3_fc_mode
5352 hns3_get_current_fc_mode(struct rte_eth_dev *dev)
5354 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5355 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5356 struct hns3_mac *mac = &hw->mac;
5359 * When the flow control mode is obtained, the device may not complete
5360 * auto-negotiation. It is necessary to wait for link establishment.
5362 (void)hns3_dev_link_update(dev, 1);
5365 * If the link auto-negotiation of the nic is disabled, or the flow
5366 * control auto-negotiation is not supported, the forced flow control
5369 if (mac->link_autoneg == 0 || !pf->support_fc_autoneg)
5370 return hw->requested_fc_mode;
5372 return hns3_get_autoneg_fc_mode(hw);
5376 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
5378 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5379 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5380 enum hns3_fc_mode current_mode;
5382 current_mode = hns3_get_current_fc_mode(dev);
5383 switch (current_mode) {
5385 fc_conf->mode = RTE_ETH_FC_FULL;
5387 case HNS3_FC_TX_PAUSE:
5388 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
5390 case HNS3_FC_RX_PAUSE:
5391 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
5395 fc_conf->mode = RTE_ETH_FC_NONE;
5399 fc_conf->pause_time = pf->pause_time;
5400 fc_conf->autoneg = pf->support_fc_autoneg ? hw->mac.link_autoneg : 0;
5406 hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg)
5408 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
5410 if (!pf->support_fc_autoneg) {
5412 hns3_err(hw, "unsupported fc auto-negotiation setting.");
5417 * Flow control auto-negotiation of the NIC is not supported,
5418 * but other auto-negotiation features may be supported.
5420 if (autoneg != hw->mac.link_autoneg) {
5421 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!");
5429 * If flow control auto-negotiation of the NIC is supported, all
5430 * auto-negotiation features are supported.
5432 if (autoneg != hw->mac.link_autoneg) {
5433 hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to change autoneg!");
5441 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
5443 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5444 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5447 if (fc_conf->high_water || fc_conf->low_water ||
5448 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) {
5449 hns3_err(hw, "Unsupported flow control settings specified, "
5450 "high_water(%u), low_water(%u), send_xon(%u) and "
5451 "mac_ctrl_frame_fwd(%u) must be set to '0'",
5452 fc_conf->high_water, fc_conf->low_water,
5453 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd);
5457 ret = hns3_check_fc_autoneg_valid(hw, fc_conf->autoneg);
5461 if (!fc_conf->pause_time) {
5462 hns3_err(hw, "Invalid pause time %u setting.",
5463 fc_conf->pause_time);
5467 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
5468 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) {
5469 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. "
5470 "current_fc_status = %d", hw->current_fc_status);
5474 if (hw->num_tc > 1 && !pf->support_multi_tc_pause) {
5475 hns3_err(hw, "in multi-TC scenarios, MAC pause is not supported.");
5479 rte_spinlock_lock(&hw->lock);
5480 ret = hns3_fc_enable(dev, fc_conf);
5481 rte_spinlock_unlock(&hw->lock);
5487 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,
5488 struct rte_eth_pfc_conf *pfc_conf)
5490 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5493 if (!hns3_dev_get_support(hw, DCB)) {
5494 hns3_err(hw, "This port does not support dcb configurations.");
5498 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water ||
5499 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) {
5500 hns3_err(hw, "Unsupported flow control settings specified, "
5501 "high_water(%u), low_water(%u), send_xon(%u) and "
5502 "mac_ctrl_frame_fwd(%u) must be set to '0'",
5503 pfc_conf->fc.high_water, pfc_conf->fc.low_water,
5504 pfc_conf->fc.send_xon,
5505 pfc_conf->fc.mac_ctrl_frame_fwd);
5508 if (pfc_conf->fc.autoneg) {
5509 hns3_err(hw, "Unsupported fc auto-negotiation setting.");
5512 if (pfc_conf->fc.pause_time == 0) {
5513 hns3_err(hw, "Invalid pause time %u setting.",
5514 pfc_conf->fc.pause_time);
5518 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
5519 hw->current_fc_status == HNS3_FC_STATUS_PFC)) {
5520 hns3_err(hw, "MAC pause is enabled. Cannot set PFC."
5521 "current_fc_status = %d", hw->current_fc_status);
5525 rte_spinlock_lock(&hw->lock);
5526 ret = hns3_dcb_pfc_enable(dev, pfc_conf);
5527 rte_spinlock_unlock(&hw->lock);
5533 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
5535 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5536 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5537 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
5540 rte_spinlock_lock(&hw->lock);
5541 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
5542 dcb_info->nb_tcs = pf->local_max_tc;
5544 dcb_info->nb_tcs = 1;
5546 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
5547 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i];
5548 for (i = 0; i < dcb_info->nb_tcs; i++)
5549 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
5551 for (i = 0; i < hw->num_tc; i++) {
5552 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i;
5553 dcb_info->tc_queue.tc_txq[0][i].base =
5554 hw->tc_queue[i].tqp_offset;
5555 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size;
5556 dcb_info->tc_queue.tc_txq[0][i].nb_queue =
5557 hw->tc_queue[i].tqp_count;
5559 rte_spinlock_unlock(&hw->lock);
5565 hns3_reinit_dev(struct hns3_adapter *hns)
5567 struct hns3_hw *hw = &hns->hw;
5570 ret = hns3_cmd_init(hw);
5572 hns3_err(hw, "Failed to init cmd: %d", ret);
5576 ret = hns3_reset_all_tqps(hns);
5578 hns3_err(hw, "Failed to reset all queues: %d", ret);
5582 ret = hns3_init_hardware(hns);
5584 hns3_err(hw, "Failed to init hardware: %d", ret);
5588 ret = hns3_enable_hw_error_intr(hns, true);
5590 hns3_err(hw, "fail to enable hw error interrupts: %d",
5594 hns3_info(hw, "Reset done, driver initialization finished.");
5600 is_pf_reset_done(struct hns3_hw *hw)
5602 uint32_t val, reg, reg_bit;
5604 switch (hw->reset.level) {
5605 case HNS3_IMP_RESET:
5606 reg = HNS3_GLOBAL_RESET_REG;
5607 reg_bit = HNS3_IMP_RESET_BIT;
5609 case HNS3_GLOBAL_RESET:
5610 reg = HNS3_GLOBAL_RESET_REG;
5611 reg_bit = HNS3_GLOBAL_RESET_BIT;
5613 case HNS3_FUNC_RESET:
5614 reg = HNS3_FUN_RST_ING;
5615 reg_bit = HNS3_FUN_RST_ING_B;
5617 case HNS3_FLR_RESET:
5619 hns3_err(hw, "Wait for unsupported reset level: %d",
5623 val = hns3_read_dev(hw, reg);
5624 if (hns3_get_bit(val, reg_bit))
5631 hns3_is_reset_pending(struct hns3_adapter *hns)
5633 struct hns3_hw *hw = &hns->hw;
5634 enum hns3_reset_level reset;
5636 hns3_check_event_cause(hns, NULL);
5637 reset = hns3_get_reset_level(hns, &hw->reset.pending);
5638 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
5639 hw->reset.level < reset) {
5640 hns3_warn(hw, "High level reset %d is pending", reset);
5643 reset = hns3_get_reset_level(hns, &hw->reset.request);
5644 if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET &&
5645 hw->reset.level < reset) {
5646 hns3_warn(hw, "High level reset %d is request", reset);
5653 hns3_wait_hardware_ready(struct hns3_adapter *hns)
5655 struct hns3_hw *hw = &hns->hw;
5656 struct hns3_wait_data *wait_data = hw->reset.wait_data;
5659 if (wait_data->result == HNS3_WAIT_SUCCESS)
5661 else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
5662 hns3_clock_gettime(&tv);
5663 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
5664 tv.tv_sec, tv.tv_usec);
5666 } else if (wait_data->result == HNS3_WAIT_REQUEST)
5669 wait_data->hns = hns;
5670 wait_data->check_completion = is_pf_reset_done;
5671 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT *
5672 HNS3_RESET_WAIT_MS + hns3_clock_gettime_ms();
5673 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC;
5674 wait_data->count = HNS3_RESET_WAIT_CNT;
5675 wait_data->result = HNS3_WAIT_REQUEST;
5676 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
5681 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id)
5683 struct hns3_cmd_desc desc;
5684 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data;
5686 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
5687 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1);
5688 req->fun_reset_vfid = func_id;
5690 return hns3_cmd_send(hw, &desc, 1);
5694 hns3_imp_reset_cmd(struct hns3_hw *hw)
5696 struct hns3_cmd_desc desc;
5698 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false);
5699 desc.data[0] = 0xeedd;
5701 return hns3_cmd_send(hw, &desc, 1);
5705 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level)
5707 struct hns3_hw *hw = &hns->hw;
5711 hns3_clock_gettime(&tv);
5712 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) ||
5713 hns3_read_dev(hw, HNS3_FUN_RST_ING)) {
5714 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld",
5715 tv.tv_sec, tv.tv_usec);
5719 switch (reset_level) {
5720 case HNS3_IMP_RESET:
5721 hns3_imp_reset_cmd(hw);
5722 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld",
5723 tv.tv_sec, tv.tv_usec);
5725 case HNS3_GLOBAL_RESET:
5726 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
5727 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1);
5728 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val);
5729 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld",
5730 tv.tv_sec, tv.tv_usec);
5732 case HNS3_FUNC_RESET:
5733 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld",
5734 tv.tv_sec, tv.tv_usec);
5735 /* schedule again to check later */
5736 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending);
5737 hns3_schedule_reset(hns);
5740 hns3_warn(hw, "Unsupported reset level: %d", reset_level);
5743 hns3_atomic_clear_bit(reset_level, &hw->reset.request);
5746 static enum hns3_reset_level
5747 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
5749 struct hns3_hw *hw = &hns->hw;
5750 enum hns3_reset_level reset_level = HNS3_NONE_RESET;
5752 /* Return the highest priority reset level amongst all */
5753 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels))
5754 reset_level = HNS3_IMP_RESET;
5755 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
5756 reset_level = HNS3_GLOBAL_RESET;
5757 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
5758 reset_level = HNS3_FUNC_RESET;
5759 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
5760 reset_level = HNS3_FLR_RESET;
5762 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
5763 return HNS3_NONE_RESET;
5769 hns3_record_imp_error(struct hns3_adapter *hns)
5771 struct hns3_hw *hw = &hns->hw;
5774 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
5775 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
5776 hns3_warn(hw, "Detected IMP RD poison!");
5777 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
5778 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
5781 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
5782 hns3_warn(hw, "Detected IMP CMDQ error!");
5783 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
5784 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
5789 hns3_prepare_reset(struct hns3_adapter *hns)
5791 struct hns3_hw *hw = &hns->hw;
5795 switch (hw->reset.level) {
5796 case HNS3_FUNC_RESET:
5797 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID);
5802 * After performaning pf reset, it is not necessary to do the
5803 * mailbox handling or send any command to firmware, because
5804 * any mailbox handling or command to firmware is only valid
5805 * after hns3_cmd_init is called.
5807 __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
5808 hw->reset.stats.request_cnt++;
5810 case HNS3_IMP_RESET:
5811 hns3_record_imp_error(hns);
5812 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
5813 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
5814 BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
5823 hns3_set_rst_done(struct hns3_hw *hw)
5825 struct hns3_pf_rst_done_cmd *req;
5826 struct hns3_cmd_desc desc;
5828 req = (struct hns3_pf_rst_done_cmd *)desc.data;
5829 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
5830 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
5831 return hns3_cmd_send(hw, &desc, 1);
5835 hns3_stop_service(struct hns3_adapter *hns)
5837 struct hns3_hw *hw = &hns->hw;
5838 struct rte_eth_dev *eth_dev;
5840 eth_dev = &rte_eth_devices[hw->data->port_id];
5841 hw->mac.link_status = RTE_ETH_LINK_DOWN;
5842 if (hw->adapter_state == HNS3_NIC_STARTED) {
5843 rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
5844 hns3_update_linkstatus_and_event(hw, false);
5847 hns3_set_rxtx_function(eth_dev);
5849 /* Disable datapath on secondary process. */
5850 hns3_mp_req_stop_rxtx(eth_dev);
5851 rte_delay_ms(hw->cfg_max_queues);
5853 rte_spinlock_lock(&hw->lock);
5854 if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
5855 hw->adapter_state == HNS3_NIC_STOPPING) {
5856 hns3_enable_all_queues(hw, false);
5858 hw->reset.mbuf_deferred_free = true;
5860 hw->reset.mbuf_deferred_free = false;
5863 * It is cumbersome for hardware to pick-and-choose entries for deletion
5864 * from table space. Hence, for function reset software intervention is
5865 * required to delete the entries
5867 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
5868 hns3_configure_all_mc_mac_addr(hns, true);
5869 rte_spinlock_unlock(&hw->lock);
5875 hns3_start_service(struct hns3_adapter *hns)
5877 struct hns3_hw *hw = &hns->hw;
5878 struct rte_eth_dev *eth_dev;
5880 if (hw->reset.level == HNS3_IMP_RESET ||
5881 hw->reset.level == HNS3_GLOBAL_RESET)
5882 hns3_set_rst_done(hw);
5883 eth_dev = &rte_eth_devices[hw->data->port_id];
5884 hns3_set_rxtx_function(eth_dev);
5885 hns3_mp_req_start_rxtx(eth_dev);
5886 if (hw->adapter_state == HNS3_NIC_STARTED) {
5888 * This API parent function already hold the hns3_hw.lock, the
5889 * hns3_service_handler may report lse, in bonding application
5890 * it will call driver's ops which may acquire the hns3_hw.lock
5891 * again, thus lead to deadlock.
5892 * We defer calls hns3_service_handler to avoid the deadlock.
5894 rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
5895 hns3_service_handler, eth_dev);
5897 /* Enable interrupt of all rx queues before enabling queues */
5898 hns3_dev_all_rx_queue_intr_enable(hw, true);
5900 * Enable state of each rxq and txq will be recovered after
5901 * reset, so we need to restore them before enable all tqps;
5903 hns3_restore_tqp_enable_state(hw);
5905 * When finished the initialization, enable queues to receive
5906 * and transmit packets.
5908 hns3_enable_all_queues(hw, true);
5915 hns3_restore_conf(struct hns3_adapter *hns)
5917 struct hns3_hw *hw = &hns->hw;
5920 ret = hns3_configure_all_mac_addr(hns, false);
5924 ret = hns3_configure_all_mc_mac_addr(hns, false);
5928 ret = hns3_dev_promisc_restore(hns);
5932 ret = hns3_restore_vlan_table(hns);
5936 ret = hns3_restore_vlan_conf(hns);
5940 ret = hns3_restore_all_fdir_filter(hns);
5944 ret = hns3_restore_ptp(hns);
5948 ret = hns3_restore_rx_interrupt(hw);
5952 ret = hns3_restore_gro_conf(hw);
5956 ret = hns3_restore_fec(hw);
5960 if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
5961 ret = hns3_do_start(hns, false);
5964 hns3_info(hw, "hns3 dev restart successful!");
5965 } else if (hw->adapter_state == HNS3_NIC_STOPPING)
5966 hw->adapter_state = HNS3_NIC_CONFIGURED;
5970 hns3_configure_all_mc_mac_addr(hns, true);
5972 hns3_configure_all_mac_addr(hns, true);
5977 hns3_reset_service(void *param)
5979 struct hns3_adapter *hns = (struct hns3_adapter *)param;
5980 struct hns3_hw *hw = &hns->hw;
5981 enum hns3_reset_level reset_level;
5982 struct timeval tv_delta;
5983 struct timeval tv_start;
5989 * The interrupt is not triggered within the delay time.
5990 * The interrupt may have been lost. It is necessary to handle
5991 * the interrupt to recover from the error.
5993 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
5994 SCHEDULE_DEFERRED) {
5995 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
5997 hns3_err(hw, "Handling interrupts in delayed tasks");
5998 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
5999 reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
6000 if (reset_level == HNS3_NONE_RESET) {
6001 hns3_err(hw, "No reset level is set, try IMP reset");
6002 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
6005 __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
6008 * Check if there is any ongoing reset in the hardware. This status can
6009 * be checked from reset_pending. If there is then, we need to wait for
6010 * hardware to complete reset.
6011 * a. If we are able to figure out in reasonable time that hardware
6012 * has fully resetted then, we can proceed with driver, client
6014 * b. else, we can come back later to check this status so re-sched
6017 reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
6018 if (reset_level != HNS3_NONE_RESET) {
6019 hns3_clock_gettime(&tv_start);
6020 ret = hns3_reset_process(hns, reset_level);
6021 hns3_clock_gettime(&tv);
6022 timersub(&tv, &tv_start, &tv_delta);
6023 msec = hns3_clock_calctime_ms(&tv_delta);
6024 if (msec > HNS3_RESET_PROCESS_MS)
6025 hns3_err(hw, "%d handle long time delta %" PRIu64
6026 " ms time=%ld.%.6ld",
6027 hw->reset.level, msec,
6028 tv.tv_sec, tv.tv_usec);
6033 /* Check if we got any *new* reset requests to be honored */
6034 reset_level = hns3_get_reset_level(hns, &hw->reset.request);
6035 if (reset_level != HNS3_NONE_RESET)
6036 hns3_msix_process(hns, reset_level);
6040 hns3_get_speed_capa_num(uint16_t device_id)
6044 switch (device_id) {
6045 case HNS3_DEV_ID_25GE:
6046 case HNS3_DEV_ID_25GE_RDMA:
6049 case HNS3_DEV_ID_100G_RDMA_MACSEC:
6050 case HNS3_DEV_ID_200G_RDMA:
6062 hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa,
6065 switch (device_id) {
6066 case HNS3_DEV_ID_25GE:
6068 case HNS3_DEV_ID_25GE_RDMA:
6069 speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed;
6070 speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa;
6072 /* In HNS3 device, the 25G NIC is compatible with 10G rate */
6073 speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed;
6074 speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa;
6076 case HNS3_DEV_ID_100G_RDMA_MACSEC:
6077 speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed;
6078 speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa;
6080 case HNS3_DEV_ID_200G_RDMA:
6081 speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed;
6082 speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa;
6092 hns3_fec_get_capability(struct rte_eth_dev *dev,
6093 struct rte_eth_fec_capa *speed_fec_capa,
6096 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6097 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
6098 uint16_t device_id = pci_dev->id.device_id;
6099 unsigned int capa_num;
6102 capa_num = hns3_get_speed_capa_num(device_id);
6103 if (capa_num == 0) {
6104 hns3_err(hw, "device(0x%x) is not supported by hns3 PMD",
6109 if (speed_fec_capa == NULL || num < capa_num)
6112 ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id);
6120 get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
6122 struct hns3_config_fec_cmd *req;
6123 struct hns3_cmd_desc desc;
6127 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported
6128 * in device of link speed
6131 if (hw->mac.link_speed < RTE_ETH_SPEED_NUM_10G) {
6136 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
6137 req = (struct hns3_config_fec_cmd *)desc.data;
6138 ret = hns3_cmd_send(hw, &desc, 1);
6140 hns3_err(hw, "get current fec auto state failed, ret = %d",
6145 *state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B);
6150 hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
6152 struct hns3_sfp_info_cmd *resp;
6153 uint32_t tmp_fec_capa;
6155 struct hns3_cmd_desc desc;
6159 * If link is down and AUTO is enabled, AUTO is returned, otherwise,
6160 * configured FEC mode is returned.
6161 * If link is up, current FEC mode is returned.
6163 if (hw->mac.link_status == RTE_ETH_LINK_DOWN) {
6164 ret = get_current_fec_auto_state(hw, &auto_state);
6168 if (auto_state == 0x1) {
6169 *fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
6174 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
6175 resp = (struct hns3_sfp_info_cmd *)desc.data;
6176 resp->query_type = HNS3_ACTIVE_QUERY;
6178 ret = hns3_cmd_send(hw, &desc, 1);
6179 if (ret == -EOPNOTSUPP) {
6180 hns3_err(hw, "IMP do not support get FEC, ret = %d", ret);
6183 hns3_err(hw, "get FEC failed, ret = %d", ret);
6188 * FEC mode order defined in hns3 hardware is inconsistent with
6189 * that defined in the ethdev library. So the sequence needs
6192 switch (resp->active_fec) {
6193 case HNS3_HW_FEC_MODE_NOFEC:
6194 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
6196 case HNS3_HW_FEC_MODE_BASER:
6197 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
6199 case HNS3_HW_FEC_MODE_RS:
6200 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
6203 tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
6207 *fec_capa = tmp_fec_capa;
6212 hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
6214 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6216 return hns3_fec_get_internal(hw, fec_capa);
6220 hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
6222 struct hns3_config_fec_cmd *req;
6223 struct hns3_cmd_desc desc;
6226 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false);
6228 req = (struct hns3_config_fec_cmd *)desc.data;
6230 case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC):
6231 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6232 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF);
6234 case RTE_ETH_FEC_MODE_CAPA_MASK(BASER):
6235 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6236 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER);
6238 case RTE_ETH_FEC_MODE_CAPA_MASK(RS):
6239 hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
6240 HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS);
6242 case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO):
6243 hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1);
6248 ret = hns3_cmd_send(hw, &desc, 1);
6250 hns3_err(hw, "set fec mode failed, ret = %d", ret);
6256 get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
6258 struct hns3_mac *mac = &hw->mac;
6261 switch (mac->link_speed) {
6262 case RTE_ETH_SPEED_NUM_10G:
6263 cur_capa = fec_capa[1].capa;
6265 case RTE_ETH_SPEED_NUM_25G:
6266 case RTE_ETH_SPEED_NUM_100G:
6267 case RTE_ETH_SPEED_NUM_200G:
6268 cur_capa = fec_capa[0].capa;
6279 is_fec_mode_one_bit_set(uint32_t mode)
6284 for (i = 0; i < sizeof(mode); i++)
6285 if (mode >> i & 0x1)
6288 return cnt == 1 ? true : false;
6292 hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
6294 #define FEC_CAPA_NUM 2
6295 struct hns3_adapter *hns = dev->data->dev_private;
6296 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6297 struct hns3_pf *pf = &hns->pf;
6299 struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
6301 uint32_t num = FEC_CAPA_NUM;
6304 ret = hns3_fec_get_capability(dev, fec_capa, num);
6308 /* HNS3 PMD only support one bit set mode, e.g. 0x1, 0x4 */
6309 if (!is_fec_mode_one_bit_set(mode)) {
6310 hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
6311 "FEC mode should be only one bit set", mode);
6316 * Check whether the configured mode is within the FEC capability.
6317 * If not, the configured mode will not be supported.
6319 cur_capa = get_current_speed_fec_cap(hw, fec_capa);
6320 if (!(cur_capa & mode)) {
6321 hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
6325 rte_spinlock_lock(&hw->lock);
6326 ret = hns3_set_fec_hw(hw, mode);
6328 rte_spinlock_unlock(&hw->lock);
6332 pf->fec_mode = mode;
6333 rte_spinlock_unlock(&hw->lock);
6339 hns3_restore_fec(struct hns3_hw *hw)
6341 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
6342 struct hns3_pf *pf = &hns->pf;
6343 uint32_t mode = pf->fec_mode;
6346 ret = hns3_set_fec_hw(hw, mode);
6348 hns3_err(hw, "restore fec mode(0x%x) failed, ret = %d",
6355 hns3_query_dev_fec_info(struct hns3_hw *hw)
6357 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
6358 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(hns);
6361 ret = hns3_fec_get_internal(hw, &pf->fec_mode);
6363 hns3_err(hw, "query device FEC info failed, ret = %d", ret);
6369 hns3_optical_module_existed(struct hns3_hw *hw)
6371 struct hns3_cmd_desc desc;
6375 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true);
6376 ret = hns3_cmd_send(hw, &desc, 1);
6379 "fail to get optical module exist state, ret = %d.\n",
6383 existed = !!desc.data[0];
6389 hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset,
6390 uint32_t len, uint8_t *data)
6392 #define HNS3_SFP_INFO_CMD_NUM 6
6393 #define HNS3_SFP_INFO_MAX_LEN \
6394 (HNS3_SFP_INFO_BD0_LEN + \
6395 (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN)
6396 struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM];
6397 struct hns3_sfp_info_bd0_cmd *sfp_info_bd0;
6403 for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) {
6404 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM,
6406 if (i < HNS3_SFP_INFO_CMD_NUM - 1)
6407 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
6410 sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data;
6411 sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset);
6412 read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN);
6413 sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len);
6415 ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM);
6417 hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n",
6422 /* The data format in BD0 is different with the others. */
6423 copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN);
6424 memcpy(data, sfp_info_bd0->data, copy_len);
6425 read_len = copy_len;
6427 for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) {
6428 if (read_len >= len)
6431 copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN);
6432 memcpy(data + read_len, desc[i].data, copy_len);
6433 read_len += copy_len;
6436 return (int)read_len;
6440 hns3_get_module_eeprom(struct rte_eth_dev *dev,
6441 struct rte_dev_eeprom_info *info)
6443 struct hns3_adapter *hns = dev->data->dev_private;
6444 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6445 uint32_t offset = info->offset;
6446 uint32_t len = info->length;
6447 uint8_t *data = info->data;
6448 uint32_t read_len = 0;
6450 if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER)
6453 if (!hns3_optical_module_existed(hw)) {
6454 hns3_err(hw, "fail to read module EEPROM: no module is connected.\n");
6458 while (read_len < len) {
6460 ret = hns3_get_module_eeprom_data(hw, offset + read_len,
6472 hns3_get_module_info(struct rte_eth_dev *dev,
6473 struct rte_eth_dev_module_info *modinfo)
6475 #define HNS3_SFF8024_ID_SFP 0x03
6476 #define HNS3_SFF8024_ID_QSFP_8438 0x0c
6477 #define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d
6478 #define HNS3_SFF8024_ID_QSFP28_8636 0x11
6479 #define HNS3_SFF_8636_V1_3 0x03
6480 struct hns3_adapter *hns = dev->data->dev_private;
6481 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
6482 struct rte_dev_eeprom_info info;
6483 struct hns3_sfp_type sfp_type;
6486 memset(&sfp_type, 0, sizeof(sfp_type));
6487 memset(&info, 0, sizeof(info));
6488 info.data = (uint8_t *)&sfp_type;
6489 info.length = sizeof(sfp_type);
6490 ret = hns3_get_module_eeprom(dev, &info);
6494 switch (sfp_type.type) {
6495 case HNS3_SFF8024_ID_SFP:
6496 modinfo->type = RTE_ETH_MODULE_SFF_8472;
6497 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
6499 case HNS3_SFF8024_ID_QSFP_8438:
6500 modinfo->type = RTE_ETH_MODULE_SFF_8436;
6501 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
6503 case HNS3_SFF8024_ID_QSFP_8436_8636:
6504 if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
6505 modinfo->type = RTE_ETH_MODULE_SFF_8436;
6506 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
6508 modinfo->type = RTE_ETH_MODULE_SFF_8636;
6509 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
6512 case HNS3_SFF8024_ID_QSFP28_8636:
6513 modinfo->type = RTE_ETH_MODULE_SFF_8636;
6514 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
6517 hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n",
6518 sfp_type.type, sfp_type.ext_type);
6525 static const struct eth_dev_ops hns3_eth_dev_ops = {
6526 .dev_configure = hns3_dev_configure,
6527 .dev_start = hns3_dev_start,
6528 .dev_stop = hns3_dev_stop,
6529 .dev_close = hns3_dev_close,
6530 .promiscuous_enable = hns3_dev_promiscuous_enable,
6531 .promiscuous_disable = hns3_dev_promiscuous_disable,
6532 .allmulticast_enable = hns3_dev_allmulticast_enable,
6533 .allmulticast_disable = hns3_dev_allmulticast_disable,
6534 .mtu_set = hns3_dev_mtu_set,
6535 .stats_get = hns3_stats_get,
6536 .stats_reset = hns3_stats_reset,
6537 .xstats_get = hns3_dev_xstats_get,
6538 .xstats_get_names = hns3_dev_xstats_get_names,
6539 .xstats_reset = hns3_dev_xstats_reset,
6540 .xstats_get_by_id = hns3_dev_xstats_get_by_id,
6541 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
6542 .dev_infos_get = hns3_dev_infos_get,
6543 .fw_version_get = hns3_fw_version_get,
6544 .rx_queue_setup = hns3_rx_queue_setup,
6545 .tx_queue_setup = hns3_tx_queue_setup,
6546 .rx_queue_release = hns3_dev_rx_queue_release,
6547 .tx_queue_release = hns3_dev_tx_queue_release,
6548 .rx_queue_start = hns3_dev_rx_queue_start,
6549 .rx_queue_stop = hns3_dev_rx_queue_stop,
6550 .tx_queue_start = hns3_dev_tx_queue_start,
6551 .tx_queue_stop = hns3_dev_tx_queue_stop,
6552 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
6553 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
6554 .rxq_info_get = hns3_rxq_info_get,
6555 .txq_info_get = hns3_txq_info_get,
6556 .rx_burst_mode_get = hns3_rx_burst_mode_get,
6557 .tx_burst_mode_get = hns3_tx_burst_mode_get,
6558 .flow_ctrl_get = hns3_flow_ctrl_get,
6559 .flow_ctrl_set = hns3_flow_ctrl_set,
6560 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,
6561 .mac_addr_add = hns3_add_mac_addr,
6562 .mac_addr_remove = hns3_remove_mac_addr,
6563 .mac_addr_set = hns3_set_default_mac_addr,
6564 .set_mc_addr_list = hns3_set_mc_mac_addr_list,
6565 .link_update = hns3_dev_link_update,
6566 .dev_set_link_up = hns3_dev_set_link_up,
6567 .dev_set_link_down = hns3_dev_set_link_down,
6568 .rss_hash_update = hns3_dev_rss_hash_update,
6569 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
6570 .reta_update = hns3_dev_rss_reta_update,
6571 .reta_query = hns3_dev_rss_reta_query,
6572 .flow_ops_get = hns3_dev_flow_ops_get,
6573 .vlan_filter_set = hns3_vlan_filter_set,
6574 .vlan_tpid_set = hns3_vlan_tpid_set,
6575 .vlan_offload_set = hns3_vlan_offload_set,
6576 .vlan_pvid_set = hns3_vlan_pvid_set,
6577 .get_reg = hns3_get_regs,
6578 .get_module_info = hns3_get_module_info,
6579 .get_module_eeprom = hns3_get_module_eeprom,
6580 .get_dcb_info = hns3_get_dcb_info,
6581 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
6582 .fec_get_capability = hns3_fec_get_capability,
6583 .fec_get = hns3_fec_get,
6584 .fec_set = hns3_fec_set,
6585 .tm_ops_get = hns3_tm_ops_get,
6586 .tx_done_cleanup = hns3_tx_done_cleanup,
6587 .timesync_enable = hns3_timesync_enable,
6588 .timesync_disable = hns3_timesync_disable,
6589 .timesync_read_rx_timestamp = hns3_timesync_read_rx_timestamp,
6590 .timesync_read_tx_timestamp = hns3_timesync_read_tx_timestamp,
6591 .timesync_adjust_time = hns3_timesync_adjust_time,
6592 .timesync_read_time = hns3_timesync_read_time,
6593 .timesync_write_time = hns3_timesync_write_time,
6596 static const struct hns3_reset_ops hns3_reset_ops = {
6597 .reset_service = hns3_reset_service,
6598 .stop_service = hns3_stop_service,
6599 .prepare_reset = hns3_prepare_reset,
6600 .wait_hardware_ready = hns3_wait_hardware_ready,
6601 .reinit_dev = hns3_reinit_dev,
6602 .restore_conf = hns3_restore_conf,
6603 .start_service = hns3_start_service,
6607 hns3_init_hw_ops(struct hns3_hw *hw)
6609 hw->ops.add_mc_mac_addr = hns3_add_mc_mac_addr;
6610 hw->ops.del_mc_mac_addr = hns3_remove_mc_mac_addr;
6611 hw->ops.add_uc_mac_addr = hns3_add_uc_mac_addr;
6612 hw->ops.del_uc_mac_addr = hns3_remove_uc_mac_addr;
6613 hw->ops.bind_ring_with_vector = hns3_bind_ring_with_vector;
6617 hns3_dev_init(struct rte_eth_dev *eth_dev)
6619 struct hns3_adapter *hns = eth_dev->data->dev_private;
6620 struct hns3_hw *hw = &hns->hw;
6623 PMD_INIT_FUNC_TRACE();
6625 hns3_flow_init(eth_dev);
6627 hns3_set_rxtx_function(eth_dev);
6628 eth_dev->dev_ops = &hns3_eth_dev_ops;
6629 eth_dev->rx_queue_count = hns3_rx_queue_count;
6630 ret = hns3_mp_init(eth_dev);
6634 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
6635 hns3_tx_push_init(eth_dev);
6639 hw->adapter_state = HNS3_NIC_UNINITIALIZED;
6641 hw->data = eth_dev->data;
6642 hns3_parse_devargs(eth_dev);
6645 * Set default max packet size according to the mtu
6646 * default vale in DPDK frame.
6648 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
6650 ret = hns3_reset_init(hw);
6652 goto err_init_reset;
6653 hw->reset.ops = &hns3_reset_ops;
6655 hns3_init_hw_ops(hw);
6656 ret = hns3_init_pf(eth_dev);
6658 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
6662 ret = hns3_init_mac_addrs(eth_dev);
6664 goto err_init_mac_addrs;
6666 hw->adapter_state = HNS3_NIC_INITIALIZED;
6668 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
6670 hns3_err(hw, "Reschedule reset service after dev_init");
6671 hns3_schedule_reset(hns);
6673 /* IMP will wait ready flag before reset */
6674 hns3_notify_reset_ready(hw, false);
6677 hns3_info(hw, "hns3 dev initialization successful!");
6681 hns3_uninit_pf(eth_dev);
6684 rte_free(hw->reset.wait_data);
6687 hns3_mp_uninit(eth_dev);
6690 eth_dev->dev_ops = NULL;
6691 eth_dev->rx_pkt_burst = NULL;
6692 eth_dev->rx_descriptor_status = NULL;
6693 eth_dev->tx_pkt_burst = NULL;
6694 eth_dev->tx_pkt_prepare = NULL;
6695 eth_dev->tx_descriptor_status = NULL;
6700 hns3_dev_uninit(struct rte_eth_dev *eth_dev)
6702 struct hns3_adapter *hns = eth_dev->data->dev_private;
6703 struct hns3_hw *hw = &hns->hw;
6705 PMD_INIT_FUNC_TRACE();
6707 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
6708 hns3_mp_uninit(eth_dev);
6712 if (hw->adapter_state < HNS3_NIC_CLOSING)
6713 hns3_dev_close(eth_dev);
6715 hw->adapter_state = HNS3_NIC_REMOVED;
6720 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
6721 struct rte_pci_device *pci_dev)
6723 return rte_eth_dev_pci_generic_probe(pci_dev,
6724 sizeof(struct hns3_adapter),
6729 eth_hns3_pci_remove(struct rte_pci_device *pci_dev)
6731 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit);
6734 static const struct rte_pci_id pci_id_hns3_map[] = {
6735 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) },
6736 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) },
6737 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) },
6738 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
6739 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
6740 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) },
6741 { .vendor_id = 0, }, /* sentinel */
6744 static struct rte_pci_driver rte_hns3_pmd = {
6745 .id_table = pci_id_hns3_map,
6746 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
6747 .probe = eth_hns3_pci_probe,
6748 .remove = eth_hns3_pci_remove,
6751 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
6752 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
6753 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
6754 RTE_PMD_REGISTER_PARAM_STRING(net_hns3,
6755 HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
6756 HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
6757 HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
6758 HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16> ");
6759 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE);
6760 RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE);