1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
12 #include <rte_atomic.h>
13 #include <rte_bus_pci.h>
14 #include <rte_common.h>
15 #include <rte_cycles.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_pci.h>
21 #include <rte_interrupts.h>
26 #include "hns3_ethdev.h"
27 #include "hns3_logs.h"
28 #include "hns3_rxtx.h"
29 #include "hns3_intr.h"
30 #include "hns3_regs.h"
34 #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32
35 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1
37 #define HNS3_SERVICE_INTERVAL 1000000 /* us */
38 #define HNS3_INVLID_PVID 0xFFFF
40 #define HNS3_FILTER_TYPE_VF 0
41 #define HNS3_FILTER_TYPE_PORT 1
42 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0)
43 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0)
44 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1)
45 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2)
46 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3)
47 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \
48 | HNS3_FILTER_FE_ROCE_EGRESS_B)
49 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \
50 | HNS3_FILTER_FE_ROCE_INGRESS_B)
52 /* Reset related Registers */
53 #define HNS3_GLOBAL_RESET_BIT 0
54 #define HNS3_CORE_RESET_BIT 1
55 #define HNS3_IMP_RESET_BIT 2
56 #define HNS3_FUN_RST_ING_B 0
58 #define HNS3_VECTOR0_IMP_RESET_INT_B 1
59 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U
60 #define HNS3_VECTOR0_IMP_RD_POISON_B 5U
61 #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U
63 #define HNS3_RESET_WAIT_MS 100
64 #define HNS3_RESET_WAIT_CNT 200
67 HNS3_VECTOR0_EVENT_RST,
68 HNS3_VECTOR0_EVENT_MBX,
69 HNS3_VECTOR0_EVENT_ERR,
70 HNS3_VECTOR0_EVENT_OTHER,
73 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
75 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
76 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
78 static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev);
80 static int hns3_add_mc_addr(struct hns3_hw *hw,
81 struct rte_ether_addr *mac_addr);
82 static int hns3_remove_mc_addr(struct hns3_hw *hw,
83 struct rte_ether_addr *mac_addr);
86 hns3_pf_disable_irq0(struct hns3_hw *hw)
88 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
92 hns3_pf_enable_irq0(struct hns3_hw *hw)
94 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
97 static enum hns3_evt_cause
98 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
100 struct hns3_hw *hw = &hns->hw;
101 uint32_t vector0_int_stats;
102 uint32_t cmdq_src_val;
103 uint32_t hw_err_src_reg;
105 enum hns3_evt_cause ret;
107 /* fetch the events from their corresponding regs */
108 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
109 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
110 hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
113 * Assumption: If by any chance reset and mailbox events are reported
114 * together then we will only process reset event and defer the
115 * processing of the mailbox events. Since, we would have not cleared
116 * RX CMDQ event this time we would receive again another interrupt
117 * from H/W just for the mailbox.
119 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */
120 rte_atomic16_set(&hw->reset.disable_cmd, 1);
121 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
122 val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
124 hw->reset.stats.imp_cnt++;
125 hns3_warn(hw, "IMP reset detected, clear reset status");
127 hns3_schedule_delayed_reset(hns);
128 hns3_warn(hw, "IMP reset detected, don't clear reset status");
131 ret = HNS3_VECTOR0_EVENT_RST;
136 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) {
137 rte_atomic16_set(&hw->reset.disable_cmd, 1);
138 hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
139 val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
141 hw->reset.stats.global_cnt++;
142 hns3_warn(hw, "Global reset detected, clear reset status");
144 hns3_schedule_delayed_reset(hns);
145 hns3_warn(hw, "Global reset detected, don't clear reset status");
148 ret = HNS3_VECTOR0_EVENT_RST;
152 /* check for vector0 msix event source */
153 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK ||
154 hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) {
155 val = vector0_int_stats | hw_err_src_reg;
156 ret = HNS3_VECTOR0_EVENT_ERR;
160 /* check for vector0 mailbox(=CMDQ RX) event source */
161 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) {
162 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
164 ret = HNS3_VECTOR0_EVENT_MBX;
168 if (clearval && (vector0_int_stats || cmdq_src_val || hw_err_src_reg))
169 hns3_warn(hw, "vector0_int_stats:0x%x cmdq_src_val:0x%x hw_err_src_reg:0x%x",
170 vector0_int_stats, cmdq_src_val, hw_err_src_reg);
171 val = vector0_int_stats;
172 ret = HNS3_VECTOR0_EVENT_OTHER;
181 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
183 if (event_type == HNS3_VECTOR0_EVENT_RST)
184 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
185 else if (event_type == HNS3_VECTOR0_EVENT_MBX)
186 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
190 hns3_clear_all_event_cause(struct hns3_hw *hw)
192 uint32_t vector0_int_stats;
193 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
195 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats)
196 hns3_warn(hw, "Probe during IMP reset interrupt");
198 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats)
199 hns3_warn(hw, "Probe during Global reset interrupt");
201 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST,
202 BIT(HNS3_VECTOR0_IMPRESET_INT_B) |
203 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) |
204 BIT(HNS3_VECTOR0_CORERESET_INT_B));
205 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0);
209 hns3_interrupt_handler(void *param)
211 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
212 struct hns3_adapter *hns = dev->data->dev_private;
213 struct hns3_hw *hw = &hns->hw;
214 enum hns3_evt_cause event_cause;
215 uint32_t clearval = 0;
217 /* Disable interrupt */
218 hns3_pf_disable_irq0(hw);
220 event_cause = hns3_check_event_cause(hns, &clearval);
222 /* vector 0 interrupt is shared with reset and mailbox source events. */
223 if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
224 hns3_warn(hw, "Received err interrupt");
225 hns3_handle_msix_error(hns, &hw->reset.request);
226 hns3_handle_ras_error(hns, &hw->reset.request);
227 hns3_schedule_reset(hns);
228 } else if (event_cause == HNS3_VECTOR0_EVENT_RST) {
229 hns3_warn(hw, "Received reset interrupt");
230 hns3_schedule_reset(hns);
231 } else if (event_cause == HNS3_VECTOR0_EVENT_MBX)
232 hns3_dev_handle_mbx_msg(hw);
234 hns3_err(hw, "Received unknown event");
236 hns3_clear_event_cause(hw, event_cause, clearval);
237 /* Enable interrupt if it is not cause by reset */
238 hns3_pf_enable_irq0(hw);
242 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on)
244 #define HNS3_VLAN_ID_OFFSET_STEP 160
245 #define HNS3_VLAN_BYTE_SIZE 8
246 struct hns3_vlan_filter_pf_cfg_cmd *req;
247 struct hns3_hw *hw = &hns->hw;
248 uint8_t vlan_offset_byte_val;
249 struct hns3_cmd_desc desc;
250 uint8_t vlan_offset_byte;
251 uint8_t vlan_offset_base;
254 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false);
256 vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP;
257 vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) /
259 vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE);
261 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data;
262 req->vlan_offset = vlan_offset_base;
263 req->vlan_cfg = on ? 0 : 1;
264 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
266 ret = hns3_cmd_send(hw, &desc, 1);
268 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d",
275 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id)
277 struct hns3_user_vlan_table *vlan_entry;
278 struct hns3_pf *pf = &hns->pf;
280 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
281 if (vlan_entry->vlan_id == vlan_id) {
282 if (vlan_entry->hd_tbl_status)
283 hns3_set_port_vlan_filter(hns, vlan_id, 0);
284 LIST_REMOVE(vlan_entry, next);
285 rte_free(vlan_entry);
292 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
295 struct hns3_user_vlan_table *vlan_entry;
296 struct hns3_hw *hw = &hns->hw;
297 struct hns3_pf *pf = &hns->pf;
299 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
300 if (vlan_entry->vlan_id == vlan_id)
304 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0);
305 if (vlan_entry == NULL) {
306 hns3_err(hw, "Failed to malloc hns3 vlan table");
310 vlan_entry->hd_tbl_status = writen_to_tbl;
311 vlan_entry->vlan_id = vlan_id;
313 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);
317 hns3_restore_vlan_table(struct hns3_adapter *hns)
319 struct hns3_user_vlan_table *vlan_entry;
320 struct hns3_hw *hw = &hns->hw;
321 struct hns3_pf *pf = &hns->pf;
325 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE)
326 return hns3_vlan_pvid_configure(hns,
327 hw->port_base_vlan_cfg.pvid, 1);
329 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
330 if (vlan_entry->hd_tbl_status) {
331 vlan_id = vlan_entry->vlan_id;
332 ret = hns3_set_port_vlan_filter(hns, vlan_id, 1);
342 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
344 struct hns3_hw *hw = &hns->hw;
345 bool writen_to_tbl = false;
349 * When vlan filter is enabled, hardware regards vlan id 0 as the entry
350 * for normal packet, deleting vlan id 0 is not allowed.
352 if (on == 0 && vlan_id == 0)
356 * When port base vlan enabled, we use port base vlan as the vlan
357 * filter condition. In this case, we don't update vlan filter table
358 * when user add new vlan or remove exist vlan, just update the
359 * vlan list. The vlan id in vlan list will be writen in vlan filter
360 * table until port base vlan disabled
362 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
363 ret = hns3_set_port_vlan_filter(hns, vlan_id, on);
364 writen_to_tbl = true;
367 if (ret == 0 && vlan_id) {
369 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);
371 hns3_rm_dev_vlan_table(hns, vlan_id);
377 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
379 struct hns3_adapter *hns = dev->data->dev_private;
380 struct hns3_hw *hw = &hns->hw;
383 rte_spinlock_lock(&hw->lock);
384 ret = hns3_vlan_filter_configure(hns, vlan_id, on);
385 rte_spinlock_unlock(&hw->lock);
390 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
393 struct hns3_rx_vlan_type_cfg_cmd *rx_req;
394 struct hns3_tx_vlan_type_cfg_cmd *tx_req;
395 struct hns3_hw *hw = &hns->hw;
396 struct hns3_cmd_desc desc;
399 if ((vlan_type != ETH_VLAN_TYPE_INNER &&
400 vlan_type != ETH_VLAN_TYPE_OUTER)) {
401 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
405 if (tpid != RTE_ETHER_TYPE_VLAN) {
406 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type);
410 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
411 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
413 if (vlan_type == ETH_VLAN_TYPE_OUTER) {
414 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
415 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
416 } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
417 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
418 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
419 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
420 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid);
423 ret = hns3_cmd_send(hw, &desc, 1);
425 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d",
430 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false);
432 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data;
433 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid);
434 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid);
436 ret = hns3_cmd_send(hw, &desc, 1);
438 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d",
444 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
447 struct hns3_adapter *hns = dev->data->dev_private;
448 struct hns3_hw *hw = &hns->hw;
451 rte_spinlock_lock(&hw->lock);
452 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid);
453 rte_spinlock_unlock(&hw->lock);
458 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
459 struct hns3_rx_vtag_cfg *vcfg)
461 struct hns3_vport_vtag_rx_cfg_cmd *req;
462 struct hns3_hw *hw = &hns->hw;
463 struct hns3_cmd_desc desc;
468 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false);
470 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data;
471 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B,
472 vcfg->strip_tag1_en ? 1 : 0);
473 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B,
474 vcfg->strip_tag2_en ? 1 : 0);
475 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B,
476 vcfg->vlan1_vlan_prionly ? 1 : 0);
477 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
478 vcfg->vlan2_vlan_prionly ? 1 : 0);
481 * In current version VF is not supported when PF is driven by DPDK
482 * driver, just need to configure parameters for PF vport.
484 vport_id = HNS3_PF_FUNC_ID;
485 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
486 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
487 req->vf_bitmap[req->vf_offset] = bitmap;
489 ret = hns3_cmd_send(hw, &desc, 1);
491 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret);
496 hns3_update_rx_offload_cfg(struct hns3_adapter *hns,
497 struct hns3_rx_vtag_cfg *vcfg)
499 struct hns3_pf *pf = &hns->pf;
500 memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg));
504 hns3_update_tx_offload_cfg(struct hns3_adapter *hns,
505 struct hns3_tx_vtag_cfg *vcfg)
507 struct hns3_pf *pf = &hns->pf;
508 memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg));
512 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
514 struct hns3_rx_vtag_cfg rxvlan_cfg;
515 struct hns3_hw *hw = &hns->hw;
518 if (hw->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
519 rxvlan_cfg.strip_tag1_en = false;
520 rxvlan_cfg.strip_tag2_en = enable;
522 rxvlan_cfg.strip_tag1_en = enable;
523 rxvlan_cfg.strip_tag2_en = true;
526 rxvlan_cfg.vlan1_vlan_prionly = false;
527 rxvlan_cfg.vlan2_vlan_prionly = false;
528 rxvlan_cfg.rx_vlan_offload_en = enable;
530 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
532 hns3_err(hw, "enable strip rx vtag failed, ret =%d", ret);
536 hns3_update_rx_offload_cfg(hns, &rxvlan_cfg);
542 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type,
543 uint8_t fe_type, bool filter_en, uint8_t vf_id)
545 struct hns3_vlan_filter_ctrl_cmd *req;
546 struct hns3_cmd_desc desc;
549 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false);
551 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data;
552 req->vlan_type = vlan_type;
553 req->vlan_fe = filter_en ? fe_type : 0;
556 ret = hns3_cmd_send(hw, &desc, 1);
558 hns3_err(hw, "set vlan filter fail, ret =%d", ret);
564 hns3_vlan_filter_init(struct hns3_adapter *hns)
566 struct hns3_hw *hw = &hns->hw;
569 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF,
570 HNS3_FILTER_FE_EGRESS, false,
573 hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret);
577 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
578 HNS3_FILTER_FE_INGRESS, false,
581 hns3_err(hw, "failed to init port vlan filter, ret = %d", ret);
587 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable)
589 struct hns3_hw *hw = &hns->hw;
592 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
593 HNS3_FILTER_FE_INGRESS, enable,
596 hns3_err(hw, "failed to %s port vlan filter, ret = %d",
597 enable ? "enable" : "disable", ret);
603 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
605 struct hns3_adapter *hns = dev->data->dev_private;
606 struct hns3_hw *hw = &hns->hw;
607 struct rte_eth_rxmode *rxmode;
608 unsigned int tmp_mask;
612 rte_spinlock_lock(&hw->lock);
613 rxmode = &dev->data->dev_conf.rxmode;
614 tmp_mask = (unsigned int)mask;
615 if (tmp_mask & ETH_VLAN_FILTER_MASK) {
616 /* ignore vlan filter configuration during promiscuous mode */
617 if (!dev->data->promiscuous) {
618 /* Enable or disable VLAN filter */
619 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ?
622 ret = hns3_enable_vlan_filter(hns, enable);
624 rte_spinlock_unlock(&hw->lock);
625 hns3_err(hw, "failed to %s rx filter, ret = %d",
626 enable ? "enable" : "disable", ret);
632 if (tmp_mask & ETH_VLAN_STRIP_MASK) {
633 /* Enable or disable VLAN stripping */
634 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
637 ret = hns3_en_hw_strip_rxvtag(hns, enable);
639 rte_spinlock_unlock(&hw->lock);
640 hns3_err(hw, "failed to %s rx strip, ret = %d",
641 enable ? "enable" : "disable", ret);
646 rte_spinlock_unlock(&hw->lock);
652 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
653 struct hns3_tx_vtag_cfg *vcfg)
655 struct hns3_vport_vtag_tx_cfg_cmd *req;
656 struct hns3_cmd_desc desc;
657 struct hns3_hw *hw = &hns->hw;
662 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false);
664 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data;
665 req->def_vlan_tag1 = vcfg->default_tag1;
666 req->def_vlan_tag2 = vcfg->default_tag2;
667 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B,
668 vcfg->accept_tag1 ? 1 : 0);
669 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B,
670 vcfg->accept_untag1 ? 1 : 0);
671 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B,
672 vcfg->accept_tag2 ? 1 : 0);
673 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B,
674 vcfg->accept_untag2 ? 1 : 0);
675 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B,
676 vcfg->insert_tag1_en ? 1 : 0);
677 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B,
678 vcfg->insert_tag2_en ? 1 : 0);
679 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
682 * In current version VF is not supported when PF is driven by DPDK
683 * driver, just need to configure parameters for PF vport.
685 vport_id = HNS3_PF_FUNC_ID;
686 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
687 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
688 req->vf_bitmap[req->vf_offset] = bitmap;
690 ret = hns3_cmd_send(hw, &desc, 1);
692 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret);
698 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state,
701 struct hns3_hw *hw = &hns->hw;
702 struct hns3_tx_vtag_cfg txvlan_cfg;
705 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) {
706 txvlan_cfg.accept_tag1 = true;
707 txvlan_cfg.insert_tag1_en = false;
708 txvlan_cfg.default_tag1 = 0;
710 txvlan_cfg.accept_tag1 = false;
711 txvlan_cfg.insert_tag1_en = true;
712 txvlan_cfg.default_tag1 = pvid;
715 txvlan_cfg.accept_untag1 = true;
716 txvlan_cfg.accept_tag2 = true;
717 txvlan_cfg.accept_untag2 = true;
718 txvlan_cfg.insert_tag2_en = false;
719 txvlan_cfg.default_tag2 = 0;
721 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg);
723 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid,
728 hns3_update_tx_offload_cfg(hns, &txvlan_cfg);
733 hns3_store_port_base_vlan_info(struct hns3_adapter *hns, uint16_t pvid, int on)
735 struct hns3_hw *hw = &hns->hw;
737 hw->port_base_vlan_cfg.state = on ?
738 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
740 hw->port_base_vlan_cfg.pvid = pvid;
744 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list)
746 struct hns3_user_vlan_table *vlan_entry;
747 struct hns3_pf *pf = &hns->pf;
749 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
750 if (vlan_entry->hd_tbl_status)
751 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0);
753 vlan_entry->hd_tbl_status = false;
757 vlan_entry = LIST_FIRST(&pf->vlan_list);
759 LIST_REMOVE(vlan_entry, next);
760 rte_free(vlan_entry);
761 vlan_entry = LIST_FIRST(&pf->vlan_list);
767 hns3_add_all_vlan_table(struct hns3_adapter *hns)
769 struct hns3_user_vlan_table *vlan_entry;
770 struct hns3_pf *pf = &hns->pf;
772 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
773 if (!vlan_entry->hd_tbl_status)
774 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1);
776 vlan_entry->hd_tbl_status = true;
781 hns3_remove_all_vlan_table(struct hns3_adapter *hns)
783 struct hns3_hw *hw = &hns->hw;
786 hns3_rm_all_vlan_table(hns, true);
787 if (hw->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) {
788 ret = hns3_set_port_vlan_filter(hns,
789 hw->port_base_vlan_cfg.pvid, 0);
791 hns3_err(hw, "Failed to remove all vlan table, ret =%d",
799 hns3_update_vlan_filter_entries(struct hns3_adapter *hns,
800 uint16_t port_base_vlan_state,
801 uint16_t new_pvid, uint16_t old_pvid)
803 struct hns3_hw *hw = &hns->hw;
806 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) {
807 if (old_pvid != HNS3_INVLID_PVID && old_pvid != 0) {
808 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0);
811 "Failed to clear clear old pvid filter, ret =%d",
817 hns3_rm_all_vlan_table(hns, false);
818 return hns3_set_port_vlan_filter(hns, new_pvid, 1);
822 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0);
824 hns3_err(hw, "Failed to set port vlan filter, ret =%d",
830 if (new_pvid == hw->port_base_vlan_cfg.pvid)
831 hns3_add_all_vlan_table(hns);
837 hns3_en_pvid_strip(struct hns3_adapter *hns, int on)
839 struct hns3_rx_vtag_cfg *old_cfg = &hns->pf.vtag_config.rx_vcfg;
840 struct hns3_rx_vtag_cfg rx_vlan_cfg;
844 rx_strip_en = old_cfg->rx_vlan_offload_en ? true : false;
846 rx_vlan_cfg.strip_tag1_en = rx_strip_en;
847 rx_vlan_cfg.strip_tag2_en = true;
849 rx_vlan_cfg.strip_tag1_en = false;
850 rx_vlan_cfg.strip_tag2_en = rx_strip_en;
852 rx_vlan_cfg.vlan1_vlan_prionly = false;
853 rx_vlan_cfg.vlan2_vlan_prionly = false;
854 rx_vlan_cfg.rx_vlan_offload_en = old_cfg->rx_vlan_offload_en;
856 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg);
860 hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg);
865 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on)
867 struct hns3_hw *hw = &hns->hw;
868 uint16_t port_base_vlan_state;
872 if (on == 0 && pvid != hw->port_base_vlan_cfg.pvid) {
873 if (hw->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID)
874 hns3_warn(hw, "Invalid operation! As current pvid set "
875 "is %u, disable pvid %u is invalid",
876 hw->port_base_vlan_cfg.pvid, pvid);
880 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE :
881 HNS3_PORT_BASE_VLAN_DISABLE;
882 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid);
884 hns3_err(hw, "failed to config tx vlan for pvid, ret = %d",
889 ret = hns3_en_pvid_strip(hns, on);
891 hns3_err(hw, "failed to config rx vlan strip for pvid, "
896 if (pvid == HNS3_INVLID_PVID)
898 old_pvid = hw->port_base_vlan_cfg.pvid;
899 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid,
902 hns3_err(hw, "Failed to update vlan filter entries, ret =%d",
908 hns3_store_port_base_vlan_info(hns, pvid, on);
913 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
915 struct hns3_adapter *hns = dev->data->dev_private;
916 struct hns3_hw *hw = &hns->hw;
917 bool pvid_en_state_change;
921 if (pvid > RTE_ETHER_MAX_VLAN_ID) {
922 hns3_err(hw, "Invalid vlan_id = %u > %d", pvid,
923 RTE_ETHER_MAX_VLAN_ID);
928 * If PVID configuration state change, should refresh the PVID
929 * configuration state in struct hns3_tx_queue/hns3_rx_queue.
931 pvid_state = hw->port_base_vlan_cfg.state;
932 if ((on && pvid_state == HNS3_PORT_BASE_VLAN_ENABLE) ||
933 (!on && pvid_state == HNS3_PORT_BASE_VLAN_DISABLE))
934 pvid_en_state_change = false;
936 pvid_en_state_change = true;
938 rte_spinlock_lock(&hw->lock);
939 ret = hns3_vlan_pvid_configure(hns, pvid, on);
940 rte_spinlock_unlock(&hw->lock);
944 if (pvid_en_state_change)
945 hns3_update_all_queues_pvid_state(hw);
951 init_port_base_vlan_info(struct hns3_hw *hw)
953 hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
954 hw->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID;
958 hns3_default_vlan_config(struct hns3_adapter *hns)
960 struct hns3_hw *hw = &hns->hw;
963 ret = hns3_set_port_vlan_filter(hns, 0, 1);
965 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret);
970 hns3_init_vlan_config(struct hns3_adapter *hns)
972 struct hns3_hw *hw = &hns->hw;
976 * This function can be called in the initialization and reset process,
977 * when in reset process, it means that hardware had been reseted
978 * successfully and we need to restore the hardware configuration to
979 * ensure that the hardware configuration remains unchanged before and
982 if (rte_atomic16_read(&hw->reset.resetting) == 0)
983 init_port_base_vlan_info(hw);
985 ret = hns3_vlan_filter_init(hns);
987 hns3_err(hw, "vlan init fail in pf, ret =%d", ret);
991 ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
992 RTE_ETHER_TYPE_VLAN);
994 hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
999 * When in the reinit dev stage of the reset process, the following
1000 * vlan-related configurations may differ from those at initialization,
1001 * we will restore configurations to hardware in hns3_restore_vlan_table
1002 * and hns3_restore_vlan_conf later.
1004 if (rte_atomic16_read(&hw->reset.resetting) == 0) {
1005 ret = hns3_vlan_pvid_configure(hns, HNS3_INVLID_PVID, 0);
1007 hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
1011 ret = hns3_en_hw_strip_rxvtag(hns, false);
1013 hns3_err(hw, "rx strip configure fail in pf, ret =%d",
1019 return hns3_default_vlan_config(hns);
1023 hns3_restore_vlan_conf(struct hns3_adapter *hns)
1025 struct hns3_pf *pf = &hns->pf;
1026 struct hns3_hw *hw = &hns->hw;
1031 if (!hw->data->promiscuous) {
1032 /* restore vlan filter states */
1033 offloads = hw->data->dev_conf.rxmode.offloads;
1034 enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false;
1035 ret = hns3_enable_vlan_filter(hns, enable);
1037 hns3_err(hw, "failed to restore vlan rx filter conf, "
1043 ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg);
1045 hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret);
1049 ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg);
1051 hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret);
1057 hns3_dev_configure_vlan(struct rte_eth_dev *dev)
1059 struct hns3_adapter *hns = dev->data->dev_private;
1060 struct rte_eth_dev_data *data = dev->data;
1061 struct rte_eth_txmode *txmode;
1062 struct hns3_hw *hw = &hns->hw;
1066 txmode = &data->dev_conf.txmode;
1067 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged)
1069 "hw_vlan_reject_tagged or hw_vlan_reject_untagged "
1070 "configuration is not supported! Ignore these two "
1071 "parameters: hw_vlan_reject_tagged(%d), "
1072 "hw_vlan_reject_untagged(%d)",
1073 txmode->hw_vlan_reject_tagged,
1074 txmode->hw_vlan_reject_untagged);
1076 /* Apply vlan offload setting */
1077 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
1078 ret = hns3_vlan_offload_set(dev, mask);
1080 hns3_err(hw, "dev config rx vlan offload failed, ret = %d",
1086 * If pvid config is not set in rte_eth_conf, driver needn't to set
1087 * VLAN pvid related configuration to hardware.
1089 if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0)
1092 /* Apply pvid setting */
1093 ret = hns3_vlan_pvid_set(dev, txmode->pvid,
1094 txmode->hw_vlan_insert_pvid);
1096 hns3_err(hw, "dev config vlan pvid(%d) failed, ret = %d",
1103 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
1104 unsigned int tso_mss_max)
1106 struct hns3_cfg_tso_status_cmd *req;
1107 struct hns3_cmd_desc desc;
1110 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false);
1112 req = (struct hns3_cfg_tso_status_cmd *)desc.data;
1115 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1117 req->tso_mss_min = rte_cpu_to_le_16(tso_mss);
1120 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
1122 req->tso_mss_max = rte_cpu_to_le_16(tso_mss);
1124 return hns3_cmd_send(hw, &desc, 1);
1128 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size,
1129 uint16_t *allocated_size, bool is_alloc)
1131 struct hns3_umv_spc_alc_cmd *req;
1132 struct hns3_cmd_desc desc;
1135 req = (struct hns3_umv_spc_alc_cmd *)desc.data;
1136 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false);
1137 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1);
1138 req->space_size = rte_cpu_to_le_32(space_size);
1140 ret = hns3_cmd_send(hw, &desc, 1);
1142 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d",
1143 is_alloc ? "allocate" : "free", ret);
1147 if (is_alloc && allocated_size)
1148 *allocated_size = rte_le_to_cpu_32(desc.data[1]);
1154 hns3_init_umv_space(struct hns3_hw *hw)
1156 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1157 struct hns3_pf *pf = &hns->pf;
1158 uint16_t allocated_size = 0;
1161 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size,
1166 if (allocated_size < pf->wanted_umv_size)
1167 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u",
1168 pf->wanted_umv_size, allocated_size);
1170 pf->max_umv_size = (!!allocated_size) ? allocated_size :
1171 pf->wanted_umv_size;
1172 pf->used_umv_size = 0;
1177 hns3_uninit_umv_space(struct hns3_hw *hw)
1179 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1180 struct hns3_pf *pf = &hns->pf;
1183 if (pf->max_umv_size == 0)
1186 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false);
1190 pf->max_umv_size = 0;
1196 hns3_is_umv_space_full(struct hns3_hw *hw)
1198 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1199 struct hns3_pf *pf = &hns->pf;
1202 is_full = (pf->used_umv_size >= pf->max_umv_size);
1208 hns3_update_umv_space(struct hns3_hw *hw, bool is_free)
1210 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1211 struct hns3_pf *pf = &hns->pf;
1214 if (pf->used_umv_size > 0)
1215 pf->used_umv_size--;
1217 pf->used_umv_size++;
1221 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req,
1222 const uint8_t *addr, bool is_mc)
1224 const unsigned char *mac_addr = addr;
1225 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) |
1226 ((uint32_t)mac_addr[2] << 16) |
1227 ((uint32_t)mac_addr[1] << 8) |
1228 (uint32_t)mac_addr[0];
1229 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4];
1231 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1233 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1234 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1);
1235 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1238 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val);
1239 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff);
1243 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp,
1245 enum hns3_mac_vlan_tbl_opcode op)
1248 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u",
1253 if (op == HNS3_MAC_VLAN_ADD) {
1254 if (resp_code == 0 || resp_code == 1) {
1256 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) {
1257 hns3_err(hw, "add mac addr failed for uc_overflow");
1259 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) {
1260 hns3_err(hw, "add mac addr failed for mc_overflow");
1264 hns3_err(hw, "add mac addr failed for undefined, code=%u",
1267 } else if (op == HNS3_MAC_VLAN_REMOVE) {
1268 if (resp_code == 0) {
1270 } else if (resp_code == 1) {
1271 hns3_dbg(hw, "remove mac addr failed for miss");
1275 hns3_err(hw, "remove mac addr failed for undefined, code=%u",
1278 } else if (op == HNS3_MAC_VLAN_LKUP) {
1279 if (resp_code == 0) {
1281 } else if (resp_code == 1) {
1282 hns3_dbg(hw, "lookup mac addr failed for miss");
1286 hns3_err(hw, "lookup mac addr failed for undefined, code=%u",
1291 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u",
1298 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw,
1299 struct hns3_mac_vlan_tbl_entry_cmd *req,
1300 struct hns3_cmd_desc *desc, bool is_mc)
1306 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true);
1308 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1309 memcpy(desc[0].data, req,
1310 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1311 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD,
1313 desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1314 hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD,
1316 ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM);
1318 memcpy(desc[0].data, req,
1319 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1320 ret = hns3_cmd_send(hw, desc, 1);
1323 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.",
1327 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1328 retval = rte_le_to_cpu_16(desc[0].retval);
1330 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1331 HNS3_MAC_VLAN_LKUP);
1335 hns3_add_mac_vlan_tbl(struct hns3_hw *hw,
1336 struct hns3_mac_vlan_tbl_entry_cmd *req,
1337 struct hns3_cmd_desc *mc_desc)
1344 if (mc_desc == NULL) {
1345 struct hns3_cmd_desc desc;
1347 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false);
1348 memcpy(desc.data, req,
1349 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1350 ret = hns3_cmd_send(hw, &desc, 1);
1351 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1352 retval = rte_le_to_cpu_16(desc.retval);
1354 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1357 hns3_cmd_reuse_desc(&mc_desc[0], false);
1358 mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1359 hns3_cmd_reuse_desc(&mc_desc[1], false);
1360 mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1361 hns3_cmd_reuse_desc(&mc_desc[2], false);
1362 mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT);
1363 memcpy(mc_desc[0].data, req,
1364 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1365 mc_desc[0].retval = 0;
1366 ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM);
1367 resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff;
1368 retval = rte_le_to_cpu_16(mc_desc[0].retval);
1370 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1375 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret);
1383 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw,
1384 struct hns3_mac_vlan_tbl_entry_cmd *req)
1386 struct hns3_cmd_desc desc;
1391 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false);
1393 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1395 ret = hns3_cmd_send(hw, &desc, 1);
1397 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret);
1400 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1401 retval = rte_le_to_cpu_16(desc.retval);
1403 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1404 HNS3_MAC_VLAN_REMOVE);
1408 hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1410 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1411 struct hns3_mac_vlan_tbl_entry_cmd req;
1412 struct hns3_pf *pf = &hns->pf;
1413 struct hns3_cmd_desc desc[3];
1414 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1415 uint16_t egress_port = 0;
1419 /* check if mac addr is valid */
1420 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1421 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1423 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid",
1428 memset(&req, 0, sizeof(req));
1431 * In current version VF is not supported when PF is driven by DPDK
1432 * driver, just need to configure parameters for PF vport.
1434 vf_id = HNS3_PF_FUNC_ID;
1435 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M,
1436 HNS3_MAC_EPORT_VFID_S, vf_id);
1438 req.egress_port = rte_cpu_to_le_16(egress_port);
1440 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1443 * Lookup the mac address in the mac_vlan table, and add
1444 * it if the entry is inexistent. Repeated unicast entry
1445 * is not allowed in the mac vlan table.
1447 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, false);
1448 if (ret == -ENOENT) {
1449 if (!hns3_is_umv_space_full(hw)) {
1450 ret = hns3_add_mac_vlan_tbl(hw, &req, NULL);
1452 hns3_update_umv_space(hw, false);
1456 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size);
1461 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
1463 /* check if we just hit the duplicate */
1465 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str);
1469 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table",
1476 hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1478 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1479 struct rte_ether_addr *addr;
1483 for (i = 0; i < hw->mc_addrs_num; i++) {
1484 addr = &hw->mc_addrs[i];
1485 /* Check if there are duplicate addresses */
1486 if (rte_is_same_ether_addr(addr, mac_addr)) {
1487 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1489 hns3_err(hw, "failed to add mc mac addr, same addrs"
1490 "(%s) is added by the set_mc_mac_addr_list "
1496 ret = hns3_add_mc_addr(hw, mac_addr);
1498 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1500 hns3_err(hw, "failed to add mc mac addr(%s), ret = %d",
1507 hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1509 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1512 ret = hns3_remove_mc_addr(hw, mac_addr);
1514 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1516 hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d",
1523 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1524 uint32_t idx, __rte_unused uint32_t pool)
1526 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1527 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1530 rte_spinlock_lock(&hw->lock);
1533 * In hns3 network engine adding UC and MC mac address with different
1534 * commands with firmware. We need to determine whether the input
1535 * address is a UC or a MC address to call different commands.
1536 * By the way, it is recommended calling the API function named
1537 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because
1538 * using the rte_eth_dev_mac_addr_add API function to set MC mac address
1539 * may affect the specifications of UC mac addresses.
1541 if (rte_is_multicast_ether_addr(mac_addr))
1542 ret = hns3_add_mc_addr_common(hw, mac_addr);
1544 ret = hns3_add_uc_addr_common(hw, mac_addr);
1547 rte_spinlock_unlock(&hw->lock);
1548 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1550 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
1556 hw->mac.default_addr_setted = true;
1557 rte_spinlock_unlock(&hw->lock);
1563 hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1565 struct hns3_mac_vlan_tbl_entry_cmd req;
1566 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1569 /* check if mac addr is valid */
1570 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1571 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1573 hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid",
1578 memset(&req, 0, sizeof(req));
1579 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1580 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1581 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1582 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */
1585 hns3_update_umv_space(hw, true);
1591 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
1593 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1594 /* index will be checked by upper level rte interface */
1595 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
1596 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1599 rte_spinlock_lock(&hw->lock);
1601 if (rte_is_multicast_ether_addr(mac_addr))
1602 ret = hns3_remove_mc_addr_common(hw, mac_addr);
1604 ret = hns3_remove_uc_addr_common(hw, mac_addr);
1605 rte_spinlock_unlock(&hw->lock);
1607 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1609 hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str,
1615 hns3_set_default_mac_addr(struct rte_eth_dev *dev,
1616 struct rte_ether_addr *mac_addr)
1618 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1619 struct rte_ether_addr *oaddr;
1620 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1621 bool default_addr_setted;
1622 bool rm_succes = false;
1626 * It has been guaranteed that input parameter named mac_addr is valid
1627 * address in the rte layer of DPDK framework.
1629 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr;
1630 default_addr_setted = hw->mac.default_addr_setted;
1631 if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr))
1634 rte_spinlock_lock(&hw->lock);
1635 if (default_addr_setted) {
1636 ret = hns3_remove_uc_addr_common(hw, oaddr);
1638 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1640 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
1647 ret = hns3_add_uc_addr_common(hw, mac_addr);
1649 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1651 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret);
1652 goto err_add_uc_addr;
1655 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes);
1657 hns3_err(hw, "Failed to configure mac pause address: %d", ret);
1658 goto err_pause_addr_cfg;
1661 rte_ether_addr_copy(mac_addr,
1662 (struct rte_ether_addr *)hw->mac.mac_addr);
1663 hw->mac.default_addr_setted = true;
1664 rte_spinlock_unlock(&hw->lock);
1669 ret_val = hns3_remove_uc_addr_common(hw, mac_addr);
1671 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1674 "Failed to roll back to del setted mac addr(%s): %d",
1680 ret_val = hns3_add_uc_addr_common(hw, oaddr);
1682 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1685 "Failed to restore old uc mac addr(%s): %d",
1687 hw->mac.default_addr_setted = false;
1690 rte_spinlock_unlock(&hw->lock);
1696 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
1698 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1699 struct hns3_hw *hw = &hns->hw;
1700 struct rte_ether_addr *addr;
1705 for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) {
1706 addr = &hw->data->mac_addrs[i];
1707 if (rte_is_zero_ether_addr(addr))
1709 if (rte_is_multicast_ether_addr(addr))
1710 ret = del ? hns3_remove_mc_addr(hw, addr) :
1711 hns3_add_mc_addr(hw, addr);
1713 ret = del ? hns3_remove_uc_addr_common(hw, addr) :
1714 hns3_add_uc_addr_common(hw, addr);
1718 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1720 hns3_err(hw, "failed to %s mac addr(%s) index:%d "
1721 "ret = %d.", del ? "remove" : "restore",
1729 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr)
1731 #define HNS3_VF_NUM_IN_FIRST_DESC 192
1735 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) {
1736 word_num = vfid / 32;
1737 bit_num = vfid % 32;
1739 desc[1].data[word_num] &=
1740 rte_cpu_to_le_32(~(1UL << bit_num));
1742 desc[1].data[word_num] |=
1743 rte_cpu_to_le_32(1UL << bit_num);
1745 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32;
1746 bit_num = vfid % 32;
1748 desc[2].data[word_num] &=
1749 rte_cpu_to_le_32(~(1UL << bit_num));
1751 desc[2].data[word_num] |=
1752 rte_cpu_to_le_32(1UL << bit_num);
1757 hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1759 struct hns3_mac_vlan_tbl_entry_cmd req;
1760 struct hns3_cmd_desc desc[3];
1761 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1765 /* Check if mac addr is valid */
1766 if (!rte_is_multicast_ether_addr(mac_addr)) {
1767 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1769 hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid",
1774 memset(&req, 0, sizeof(req));
1775 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1776 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1777 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true);
1779 /* This mac addr do not exist, add new entry for it */
1780 memset(desc[0].data, 0, sizeof(desc[0].data));
1781 memset(desc[1].data, 0, sizeof(desc[0].data));
1782 memset(desc[2].data, 0, sizeof(desc[0].data));
1786 * In current version VF is not supported when PF is driven by DPDK
1787 * driver, just need to configure parameters for PF vport.
1789 vf_id = HNS3_PF_FUNC_ID;
1790 hns3_update_desc_vfid(desc, vf_id, false);
1791 ret = hns3_add_mac_vlan_tbl(hw, &req, desc);
1794 hns3_err(hw, "mc mac vlan table is full");
1795 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1797 hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret);
1804 hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1806 struct hns3_mac_vlan_tbl_entry_cmd req;
1807 struct hns3_cmd_desc desc[3];
1808 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1812 /* Check if mac addr is valid */
1813 if (!rte_is_multicast_ether_addr(mac_addr)) {
1814 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1816 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid",
1821 memset(&req, 0, sizeof(req));
1822 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1823 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1824 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true);
1827 * This mac addr exist, remove this handle's VFID for it.
1828 * In current version VF is not supported when PF is driven by
1829 * DPDK driver, just need to configure parameters for PF vport.
1831 vf_id = HNS3_PF_FUNC_ID;
1832 hns3_update_desc_vfid(desc, vf_id, true);
1834 /* All the vfid is zero, so need to delete this entry */
1835 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1836 } else if (ret == -ENOENT) {
1837 /* This mac addr doesn't exist. */
1842 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1844 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret);
1851 hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
1852 struct rte_ether_addr *mc_addr_set,
1853 uint32_t nb_mc_addr)
1855 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1856 struct rte_ether_addr *addr;
1860 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
1861 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) "
1862 "invalid. valid range: 0~%d",
1863 nb_mc_addr, HNS3_MC_MACADDR_NUM);
1867 /* Check if input mac addresses are valid */
1868 for (i = 0; i < nb_mc_addr; i++) {
1869 addr = &mc_addr_set[i];
1870 if (!rte_is_multicast_ether_addr(addr)) {
1871 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1874 "failed to set mc mac addr, addr(%s) invalid.",
1879 /* Check if there are duplicate addresses */
1880 for (j = i + 1; j < nb_mc_addr; j++) {
1881 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
1882 rte_ether_format_addr(mac_str,
1883 RTE_ETHER_ADDR_FMT_SIZE,
1885 hns3_err(hw, "failed to set mc mac addr, "
1886 "addrs invalid. two same addrs(%s).",
1893 * Check if there are duplicate addresses between mac_addrs
1896 for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) {
1897 if (rte_is_same_ether_addr(addr,
1898 &hw->data->mac_addrs[j])) {
1899 rte_ether_format_addr(mac_str,
1900 RTE_ETHER_ADDR_FMT_SIZE,
1902 hns3_err(hw, "failed to set mc mac addr, "
1903 "addrs invalid. addrs(%s) has already "
1904 "configured in mac_addr add API",
1915 hns3_set_mc_addr_calc_addr(struct hns3_hw *hw,
1916 struct rte_ether_addr *mc_addr_set,
1918 struct rte_ether_addr *reserved_addr_list,
1919 int *reserved_addr_num,
1920 struct rte_ether_addr *add_addr_list,
1922 struct rte_ether_addr *rm_addr_list,
1925 struct rte_ether_addr *addr;
1926 int current_addr_num;
1927 int reserved_num = 0;
1935 /* Calculate the mc mac address list that should be removed */
1936 current_addr_num = hw->mc_addrs_num;
1937 for (i = 0; i < current_addr_num; i++) {
1938 addr = &hw->mc_addrs[i];
1940 for (j = 0; j < mc_addr_num; j++) {
1941 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
1948 rte_ether_addr_copy(addr, &rm_addr_list[rm_num]);
1951 rte_ether_addr_copy(addr,
1952 &reserved_addr_list[reserved_num]);
1957 /* Calculate the mc mac address list that should be added */
1958 for (i = 0; i < mc_addr_num; i++) {
1959 addr = &mc_addr_set[i];
1961 for (j = 0; j < current_addr_num; j++) {
1962 if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) {
1969 rte_ether_addr_copy(addr, &add_addr_list[add_num]);
1974 /* Reorder the mc mac address list maintained by driver */
1975 for (i = 0; i < reserved_num; i++)
1976 rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]);
1978 for (i = 0; i < rm_num; i++) {
1979 num = reserved_num + i;
1980 rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]);
1983 *reserved_addr_num = reserved_num;
1984 *add_addr_num = add_num;
1985 *rm_addr_num = rm_num;
1989 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev,
1990 struct rte_ether_addr *mc_addr_set,
1991 uint32_t nb_mc_addr)
1993 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1994 struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM];
1995 struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM];
1996 struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM];
1997 struct rte_ether_addr *addr;
1998 int reserved_addr_num;
2006 /* Check if input parameters are valid */
2007 ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
2011 rte_spinlock_lock(&hw->lock);
2014 * Calculate the mc mac address lists those should be removed and be
2015 * added, Reorder the mc mac address list maintained by driver.
2017 mc_addr_num = (int)nb_mc_addr;
2018 hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num,
2019 reserved_addr_list, &reserved_addr_num,
2020 add_addr_list, &add_addr_num,
2021 rm_addr_list, &rm_addr_num);
2023 /* Remove mc mac addresses */
2024 for (i = 0; i < rm_addr_num; i++) {
2025 num = rm_addr_num - i - 1;
2026 addr = &rm_addr_list[num];
2027 ret = hns3_remove_mc_addr(hw, addr);
2029 rte_spinlock_unlock(&hw->lock);
2035 /* Add mc mac addresses */
2036 for (i = 0; i < add_addr_num; i++) {
2037 addr = &add_addr_list[i];
2038 ret = hns3_add_mc_addr(hw, addr);
2040 rte_spinlock_unlock(&hw->lock);
2044 num = reserved_addr_num + i;
2045 rte_ether_addr_copy(addr, &hw->mc_addrs[num]);
2048 rte_spinlock_unlock(&hw->lock);
2054 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
2056 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
2057 struct hns3_hw *hw = &hns->hw;
2058 struct rte_ether_addr *addr;
2063 for (i = 0; i < hw->mc_addrs_num; i++) {
2064 addr = &hw->mc_addrs[i];
2065 if (!rte_is_multicast_ether_addr(addr))
2068 ret = hns3_remove_mc_addr(hw, addr);
2070 ret = hns3_add_mc_addr(hw, addr);
2073 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
2075 hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d",
2076 del ? "Remove" : "Restore", mac_str, ret);
2083 hns3_check_mq_mode(struct rte_eth_dev *dev)
2085 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
2086 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
2087 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2088 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2089 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
2090 struct rte_eth_dcb_tx_conf *dcb_tx_conf;
2095 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
2096 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
2098 if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2099 hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB_RSS is not supported. "
2100 "rx_mq_mode = %d", rx_mq_mode);
2104 if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB ||
2105 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2106 hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB and ETH_MQ_TX_VMDQ_DCB "
2107 "is not supported. rx_mq_mode = %d, tx_mq_mode = %d",
2108 rx_mq_mode, tx_mq_mode);
2112 if (rx_mq_mode == ETH_MQ_RX_DCB_RSS) {
2113 if (dcb_rx_conf->nb_tcs > pf->tc_max) {
2114 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
2115 dcb_rx_conf->nb_tcs, pf->tc_max);
2119 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
2120 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
2121 hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
2122 "nb_tcs(%d) != %d or %d in rx direction.",
2123 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
2127 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
2128 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
2129 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
2133 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
2134 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
2135 hns3_err(hw, "dcb_tc[%d] = %d in rx direction, "
2136 "is not equal to one in tx direction.",
2137 i, dcb_rx_conf->dcb_tc[i]);
2140 if (dcb_rx_conf->dcb_tc[i] > max_tc)
2141 max_tc = dcb_rx_conf->dcb_tc[i];
2144 num_tc = max_tc + 1;
2145 if (num_tc > dcb_rx_conf->nb_tcs) {
2146 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
2147 num_tc, dcb_rx_conf->nb_tcs);
2156 hns3_check_dcb_cfg(struct rte_eth_dev *dev)
2158 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2160 if (!hns3_dev_dcb_supported(hw)) {
2161 hns3_err(hw, "this port does not support dcb configurations.");
2165 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
2166 hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
2170 /* Check multiple queue mode */
2171 return hns3_check_mq_mode(dev);
2175 hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
2176 enum hns3_ring_type queue_type, uint16_t queue_id)
2178 struct hns3_cmd_desc desc;
2179 struct hns3_ctrl_vector_chain_cmd *req =
2180 (struct hns3_ctrl_vector_chain_cmd *)desc.data;
2181 enum hns3_cmd_status status;
2182 enum hns3_opcode_type op;
2183 uint16_t tqp_type_and_id = 0;
2188 op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
2189 hns3_cmd_setup_basic_desc(&desc, op, false);
2190 req->int_vector_id = vector_id;
2192 if (queue_type == HNS3_RING_TYPE_RX)
2193 gl = HNS3_RING_GL_RX;
2195 gl = HNS3_RING_GL_TX;
2199 hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
2201 hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
2202 hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
2204 req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
2205 req->int_cause_num = 1;
2206 op_str = mmap ? "Map" : "Unmap";
2207 status = hns3_cmd_send(hw, &desc, 1);
2209 hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.",
2210 op_str, queue_id, req->int_vector_id, status);
2218 hns3_init_ring_with_vector(struct hns3_hw *hw)
2225 * In hns3 network engine, vector 0 is always the misc interrupt of this
2226 * function, vector 1~N can be used respectively for the queues of the
2227 * function. Tx and Rx queues with the same number share the interrupt
2228 * vector. In the initialization clearing the all hardware mapping
2229 * relationship configurations between queues and interrupt vectors is
2230 * needed, so some error caused by the residual configurations, such as
2231 * the unexpected Tx interrupt, can be avoid.
2233 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
2234 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
2235 vec = vec - 1; /* the last interrupt is reserved */
2236 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
2237 for (i = 0; i < hw->intr_tqps_num; i++) {
2239 * Set gap limiter/rate limiter/quanity limiter algorithm
2240 * configuration for interrupt coalesce of queue's interrupt.
2242 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
2243 HNS3_TQP_INTR_GL_DEFAULT);
2244 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
2245 HNS3_TQP_INTR_GL_DEFAULT);
2246 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
2247 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
2249 ret = hns3_bind_ring_with_vector(hw, vec, false,
2250 HNS3_RING_TYPE_TX, i);
2252 PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
2253 "vector: %d, ret=%d", i, vec, ret);
2257 ret = hns3_bind_ring_with_vector(hw, vec, false,
2258 HNS3_RING_TYPE_RX, i);
2260 PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
2261 "vector: %d, ret=%d", i, vec, ret);
2270 hns3_dev_configure(struct rte_eth_dev *dev)
2272 struct hns3_adapter *hns = dev->data->dev_private;
2273 struct rte_eth_conf *conf = &dev->data->dev_conf;
2274 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
2275 struct hns3_hw *hw = &hns->hw;
2276 struct hns3_rss_conf *rss_cfg = &hw->rss_info;
2277 uint16_t nb_rx_q = dev->data->nb_rx_queues;
2278 uint16_t nb_tx_q = dev->data->nb_tx_queues;
2279 struct rte_eth_rss_conf rss_conf;
2285 * Hardware does not support individually enable/disable/reset the Tx or
2286 * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
2287 * and Rx queues at the same time. When the numbers of Tx queues
2288 * allocated by upper applications are not equal to the numbers of Rx
2289 * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
2290 * of Tx/Rx queues. otherwise, network engine can not work as usual. But
2291 * these fake queues are imperceptible, and can not be used by upper
2294 ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
2296 hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
2300 hw->adapter_state = HNS3_NIC_CONFIGURING;
2301 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
2302 hns3_err(hw, "setting link speed/duplex not supported");
2307 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
2308 ret = hns3_check_dcb_cfg(dev);
2313 /* When RSS is not configured, redirect the packet queue 0 */
2314 if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
2315 conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
2316 rss_conf = conf->rx_adv_conf.rss_conf;
2317 if (rss_conf.rss_key == NULL) {
2318 rss_conf.rss_key = rss_cfg->key;
2319 rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
2322 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
2328 * If jumbo frames are enabled, MTU needs to be refreshed
2329 * according to the maximum RX packet length.
2331 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
2333 * Security of max_rx_pkt_len is guaranteed in dpdk frame.
2334 * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
2335 * can safely assign to "uint16_t" type variable.
2337 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
2338 ret = hns3_dev_mtu_set(dev, mtu);
2341 dev->data->mtu = mtu;
2344 ret = hns3_dev_configure_vlan(dev);
2348 /* config hardware GRO */
2349 gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
2350 ret = hns3_config_gro(hw, gro_en);
2354 hns->rx_simple_allowed = true;
2355 hns->tx_simple_allowed = true;
2356 hns->tx_vec_allowed = true;
2358 hns3_init_rx_ptype_tble(dev);
2359 hw->adapter_state = HNS3_NIC_CONFIGURED;
2364 (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
2365 hw->adapter_state = HNS3_NIC_INITIALIZED;
2371 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
2373 struct hns3_config_max_frm_size_cmd *req;
2374 struct hns3_cmd_desc desc;
2376 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);
2378 req = (struct hns3_config_max_frm_size_cmd *)desc.data;
2379 req->max_frm_size = rte_cpu_to_le_16(new_mps);
2380 req->min_frm_size = RTE_ETHER_MIN_LEN;
2382 return hns3_cmd_send(hw, &desc, 1);
2386 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
2390 ret = hns3_set_mac_mtu(hw, mps);
2392 hns3_err(hw, "Failed to set mtu, ret = %d", ret);
2396 ret = hns3_buffer_alloc(hw);
2398 hns3_err(hw, "Failed to allocate buffer, ret = %d", ret);
2404 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2406 struct hns3_adapter *hns = dev->data->dev_private;
2407 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
2408 struct hns3_hw *hw = &hns->hw;
2409 bool is_jumbo_frame;
2412 if (dev->data->dev_started) {
2413 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
2414 "before configuration", dev->data->port_id);
2418 rte_spinlock_lock(&hw->lock);
2419 is_jumbo_frame = frame_size > RTE_ETHER_MAX_LEN ? true : false;
2420 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
2423 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely
2424 * assign to "uint16_t" type variable.
2426 ret = hns3_config_mtu(hw, (uint16_t)frame_size);
2428 rte_spinlock_unlock(&hw->lock);
2429 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d",
2430 dev->data->port_id, mtu, ret);
2433 hns->pf.mps = (uint16_t)frame_size;
2435 dev->data->dev_conf.rxmode.offloads |=
2436 DEV_RX_OFFLOAD_JUMBO_FRAME;
2438 dev->data->dev_conf.rxmode.offloads &=
2439 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2440 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2441 rte_spinlock_unlock(&hw->lock);
2447 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
2449 struct hns3_adapter *hns = eth_dev->data->dev_private;
2450 struct hns3_hw *hw = &hns->hw;
2451 uint16_t queue_num = hw->tqps_num;
2454 * In interrupt mode, 'max_rx_queues' is set based on the number of
2455 * MSI-X interrupt resources of the hardware.
2457 if (hw->data->dev_conf.intr_conf.rxq == 1)
2458 queue_num = hw->intr_tqps_num;
2460 info->max_rx_queues = queue_num;
2461 info->max_tx_queues = hw->tqps_num;
2462 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
2463 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
2464 info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
2465 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
2466 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
2467 info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
2468 DEV_RX_OFFLOAD_TCP_CKSUM |
2469 DEV_RX_OFFLOAD_UDP_CKSUM |
2470 DEV_RX_OFFLOAD_SCTP_CKSUM |
2471 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
2472 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
2473 DEV_RX_OFFLOAD_KEEP_CRC |
2474 DEV_RX_OFFLOAD_SCATTER |
2475 DEV_RX_OFFLOAD_VLAN_STRIP |
2476 DEV_RX_OFFLOAD_VLAN_FILTER |
2477 DEV_RX_OFFLOAD_JUMBO_FRAME |
2478 DEV_RX_OFFLOAD_RSS_HASH |
2479 DEV_RX_OFFLOAD_TCP_LRO);
2480 info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2481 DEV_TX_OFFLOAD_IPV4_CKSUM |
2482 DEV_TX_OFFLOAD_TCP_CKSUM |
2483 DEV_TX_OFFLOAD_UDP_CKSUM |
2484 DEV_TX_OFFLOAD_SCTP_CKSUM |
2485 DEV_TX_OFFLOAD_MULTI_SEGS |
2486 DEV_TX_OFFLOAD_TCP_TSO |
2487 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
2488 DEV_TX_OFFLOAD_GRE_TNL_TSO |
2489 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
2490 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
2491 hns3_txvlan_cap_get(hw));
2493 info->rx_desc_lim = (struct rte_eth_desc_lim) {
2494 .nb_max = HNS3_MAX_RING_DESC,
2495 .nb_min = HNS3_MIN_RING_DESC,
2496 .nb_align = HNS3_ALIGN_RING_DESC,
2499 info->tx_desc_lim = (struct rte_eth_desc_lim) {
2500 .nb_max = HNS3_MAX_RING_DESC,
2501 .nb_min = HNS3_MIN_RING_DESC,
2502 .nb_align = HNS3_ALIGN_RING_DESC,
2503 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
2504 .nb_mtu_seg_max = HNS3_MAX_NON_TSO_BD_PER_PKT,
2507 info->default_rxconf = (struct rte_eth_rxconf) {
2508 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
2510 * If there are no available Rx buffer descriptors, incoming
2511 * packets are always dropped by hardware based on hns3 network
2517 info->default_txconf = (struct rte_eth_txconf) {
2518 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
2522 info->vmdq_queue_num = 0;
2524 info->reta_size = HNS3_RSS_IND_TBL_SIZE;
2525 info->hash_key_size = HNS3_RSS_KEY_SIZE;
2526 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
2528 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
2529 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
2530 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
2531 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
2532 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
2533 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
2539 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
2542 struct hns3_adapter *hns = eth_dev->data->dev_private;
2543 struct hns3_hw *hw = &hns->hw;
2544 uint32_t version = hw->fw_version;
2547 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
2548 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
2549 HNS3_FW_VERSION_BYTE3_S),
2550 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
2551 HNS3_FW_VERSION_BYTE2_S),
2552 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
2553 HNS3_FW_VERSION_BYTE1_S),
2554 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
2555 HNS3_FW_VERSION_BYTE0_S));
2556 ret += 1; /* add the size of '\0' */
2557 if (fw_size < (uint32_t)ret)
2564 hns3_dev_link_update(struct rte_eth_dev *eth_dev,
2565 __rte_unused int wait_to_complete)
2567 struct hns3_adapter *hns = eth_dev->data->dev_private;
2568 struct hns3_hw *hw = &hns->hw;
2569 struct hns3_mac *mac = &hw->mac;
2570 struct rte_eth_link new_link;
2572 if (!hns3_is_reset_pending(hns)) {
2573 hns3_update_speed_duplex(eth_dev);
2574 hns3_update_link_status(hw);
2577 memset(&new_link, 0, sizeof(new_link));
2578 switch (mac->link_speed) {
2579 case ETH_SPEED_NUM_10M:
2580 case ETH_SPEED_NUM_100M:
2581 case ETH_SPEED_NUM_1G:
2582 case ETH_SPEED_NUM_10G:
2583 case ETH_SPEED_NUM_25G:
2584 case ETH_SPEED_NUM_40G:
2585 case ETH_SPEED_NUM_50G:
2586 case ETH_SPEED_NUM_100G:
2587 case ETH_SPEED_NUM_200G:
2588 new_link.link_speed = mac->link_speed;
2591 new_link.link_speed = ETH_SPEED_NUM_100M;
2595 new_link.link_duplex = mac->link_duplex;
2596 new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
2597 new_link.link_autoneg =
2598 !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
2600 return rte_eth_linkstatus_set(eth_dev, &new_link);
2604 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
2606 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2607 struct hns3_pf *pf = &hns->pf;
2609 if (!(status->pf_state & HNS3_PF_STATE_DONE))
2612 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;
2618 hns3_query_function_status(struct hns3_hw *hw)
2620 #define HNS3_QUERY_MAX_CNT 10
2621 #define HNS3_QUERY_SLEEP_MSCOEND 1
2622 struct hns3_func_status_cmd *req;
2623 struct hns3_cmd_desc desc;
2627 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);
2628 req = (struct hns3_func_status_cmd *)desc.data;
2631 ret = hns3_cmd_send(hw, &desc, 1);
2633 PMD_INIT_LOG(ERR, "query function status failed %d",
2638 /* Check pf reset is done */
2642 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);
2643 } while (timeout++ < HNS3_QUERY_MAX_CNT);
2645 return hns3_parse_func_status(hw, req);
2649 hns3_query_pf_resource(struct hns3_hw *hw)
2651 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2652 struct hns3_pf *pf = &hns->pf;
2653 struct hns3_pf_res_cmd *req;
2654 struct hns3_cmd_desc desc;
2657 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
2658 ret = hns3_cmd_send(hw, &desc, 1);
2660 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret);
2664 req = (struct hns3_pf_res_cmd *)desc.data;
2665 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num);
2666 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
2667 hw->tqps_num = RTE_MIN(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
2668 pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number);
2670 if (req->tx_buf_size)
2672 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;
2674 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF;
2676 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);
2678 if (req->dv_buf_size)
2680 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;
2682 pf->dv_buf_size = HNS3_DEFAULT_DV;
2684 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
2687 hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number),
2688 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
2694 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)
2696 struct hns3_cfg_param_cmd *req;
2697 uint64_t mac_addr_tmp_high;
2698 uint64_t mac_addr_tmp;
2701 req = (struct hns3_cfg_param_cmd *)desc[0].data;
2703 /* get the configuration */
2704 cfg->vmdq_vport_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2705 HNS3_CFG_VMDQ_M, HNS3_CFG_VMDQ_S);
2706 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2707 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
2708 cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2709 HNS3_CFG_TQP_DESC_N_M,
2710 HNS3_CFG_TQP_DESC_N_S);
2712 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2713 HNS3_CFG_PHY_ADDR_M,
2714 HNS3_CFG_PHY_ADDR_S);
2715 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2716 HNS3_CFG_MEDIA_TP_M,
2717 HNS3_CFG_MEDIA_TP_S);
2718 cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2719 HNS3_CFG_RX_BUF_LEN_M,
2720 HNS3_CFG_RX_BUF_LEN_S);
2721 /* get mac address */
2722 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
2723 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2724 HNS3_CFG_MAC_ADDR_H_M,
2725 HNS3_CFG_MAC_ADDR_H_S);
2727 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
2729 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2730 HNS3_CFG_DEFAULT_SPEED_M,
2731 HNS3_CFG_DEFAULT_SPEED_S);
2732 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2733 HNS3_CFG_RSS_SIZE_M,
2734 HNS3_CFG_RSS_SIZE_S);
2736 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
2737 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
2739 req = (struct hns3_cfg_param_cmd *)desc[1].data;
2740 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);
2742 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2743 HNS3_CFG_SPEED_ABILITY_M,
2744 HNS3_CFG_SPEED_ABILITY_S);
2745 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2746 HNS3_CFG_UMV_TBL_SPACE_M,
2747 HNS3_CFG_UMV_TBL_SPACE_S);
2748 if (!cfg->umv_space)
2749 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;
2752 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash
2753 * @hw: pointer to struct hns3_hw
2754 * @hcfg: the config structure to be getted
2757 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)
2759 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];
2760 struct hns3_cfg_param_cmd *req;
2765 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {
2767 req = (struct hns3_cfg_param_cmd *)desc[i].data;
2768 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,
2770 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,
2771 i * HNS3_CFG_RD_LEN_BYTES);
2772 /* Len should be divided by 4 when send to hardware */
2773 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,
2774 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);
2775 req->offset = rte_cpu_to_le_32(offset);
2778 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);
2780 PMD_INIT_LOG(ERR, "get config failed %d.", ret);
2784 hns3_parse_cfg(hcfg, desc);
2790 hns3_parse_speed(int speed_cmd, uint32_t *speed)
2792 switch (speed_cmd) {
2793 case HNS3_CFG_SPEED_10M:
2794 *speed = ETH_SPEED_NUM_10M;
2796 case HNS3_CFG_SPEED_100M:
2797 *speed = ETH_SPEED_NUM_100M;
2799 case HNS3_CFG_SPEED_1G:
2800 *speed = ETH_SPEED_NUM_1G;
2802 case HNS3_CFG_SPEED_10G:
2803 *speed = ETH_SPEED_NUM_10G;
2805 case HNS3_CFG_SPEED_25G:
2806 *speed = ETH_SPEED_NUM_25G;
2808 case HNS3_CFG_SPEED_40G:
2809 *speed = ETH_SPEED_NUM_40G;
2811 case HNS3_CFG_SPEED_50G:
2812 *speed = ETH_SPEED_NUM_50G;
2814 case HNS3_CFG_SPEED_100G:
2815 *speed = ETH_SPEED_NUM_100G;
2817 case HNS3_CFG_SPEED_200G:
2818 *speed = ETH_SPEED_NUM_200G;
2828 hns3_set_default_dev_specifications(struct hns3_hw *hw)
2830 hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
2831 hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
2832 hw->rss_key_size = HNS3_RSS_KEY_SIZE;
2833 hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
2837 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
2839 struct hns3_dev_specs_0_cmd *req0;
2841 req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
2843 hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
2844 hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
2845 hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
2846 hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
2850 hns3_query_dev_specifications(struct hns3_hw *hw)
2852 struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
2856 for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
2857 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
2859 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
2861 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
2863 ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
2867 hns3_parse_dev_specifications(hw, desc);
2873 hns3_get_capability(struct hns3_hw *hw)
2875 struct rte_pci_device *pci_dev;
2876 struct rte_eth_dev *eth_dev;
2881 eth_dev = &rte_eth_devices[hw->data->port_id];
2882 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2883 device_id = pci_dev->id.device_id;
2885 if (device_id == HNS3_DEV_ID_25GE_RDMA ||
2886 device_id == HNS3_DEV_ID_50GE_RDMA ||
2887 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC ||
2888 device_id == HNS3_DEV_ID_200G_RDMA)
2889 hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
2891 /* Get PCI revision id */
2892 ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
2893 HNS3_PCI_REVISION_ID);
2894 if (ret != HNS3_PCI_REVISION_ID_LEN) {
2895 PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
2899 hw->revision = revision;
2901 if (revision < PCI_REVISION_ID_HIP09_A) {
2902 hns3_set_default_dev_specifications(hw);
2903 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
2904 hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
2905 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
2906 hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
2910 ret = hns3_query_dev_specifications(hw);
2913 "failed to query dev specifications, ret = %d",
2918 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
2919 hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
2920 hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
2921 hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
2927 hns3_get_board_configuration(struct hns3_hw *hw)
2929 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2930 struct hns3_pf *pf = &hns->pf;
2931 struct hns3_cfg cfg;
2934 ret = hns3_get_board_cfg(hw, &cfg);
2936 PMD_INIT_LOG(ERR, "get board config failed %d", ret);
2940 if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER &&
2941 !hns3_dev_copper_supported(hw)) {
2942 PMD_INIT_LOG(ERR, "media type is copper, not supported.");
2946 hw->mac.media_type = cfg.media_type;
2947 hw->rss_size_max = cfg.rss_size_max;
2948 hw->rss_dis_flag = false;
2949 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
2950 hw->mac.phy_addr = cfg.phy_addr;
2951 hw->mac.default_addr_setted = false;
2952 hw->num_tx_desc = cfg.tqp_desc_num;
2953 hw->num_rx_desc = cfg.tqp_desc_num;
2954 hw->dcb_info.num_pg = 1;
2955 hw->dcb_info.hw_pfc_map = 0;
2957 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
2959 PMD_INIT_LOG(ERR, "Get wrong speed %d, ret = %d",
2960 cfg.default_speed, ret);
2964 pf->tc_max = cfg.tc_num;
2965 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) {
2966 PMD_INIT_LOG(WARNING,
2967 "Get TC num(%u) from flash, set TC num to 1",
2972 /* Dev does not support DCB */
2973 if (!hns3_dev_dcb_supported(hw)) {
2977 pf->pfc_max = pf->tc_max;
2979 hw->dcb_info.num_tc = 1;
2980 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max,
2981 hw->tqps_num / hw->dcb_info.num_tc);
2982 hns3_set_bit(hw->hw_tc_map, 0, 1);
2983 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE;
2985 pf->wanted_umv_size = cfg.umv_space;
2991 hns3_get_configuration(struct hns3_hw *hw)
2995 ret = hns3_query_function_status(hw);
2997 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret);
3001 /* Get device capability */
3002 ret = hns3_get_capability(hw);
3004 PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
3008 /* Get pf resource */
3009 ret = hns3_query_pf_resource(hw);
3011 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret);
3015 ret = hns3_get_board_configuration(hw);
3017 PMD_INIT_LOG(ERR, "Failed to get board configuration: %d", ret);
3023 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid,
3024 uint16_t tqp_vid, bool is_pf)
3026 struct hns3_tqp_map_cmd *req;
3027 struct hns3_cmd_desc desc;
3030 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false);
3032 req = (struct hns3_tqp_map_cmd *)desc.data;
3033 req->tqp_id = rte_cpu_to_le_16(tqp_pid);
3034 req->tqp_vf = func_id;
3035 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B;
3037 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B);
3038 req->tqp_vid = rte_cpu_to_le_16(tqp_vid);
3040 ret = hns3_cmd_send(hw, &desc, 1);
3042 PMD_INIT_LOG(ERR, "TQP map failed %d", ret);
3048 hns3_map_tqp(struct hns3_hw *hw)
3050 uint16_t tqps_num = hw->total_tqps_num;
3059 * In current version VF is not supported when PF is driven by DPDK
3060 * driver, so we allocate tqps to PF as much as possible.
3063 num = DIV_ROUND_UP(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
3064 for (func_id = HNS3_PF_FUNC_ID; func_id < num; func_id++) {
3065 is_pf = func_id == HNS3_PF_FUNC_ID ? true : false;
3067 i < HNS3_MAX_TQP_NUM_PER_FUNC && tqp_id < tqps_num; i++) {
3068 ret = hns3_map_tqps_to_func(hw, func_id, tqp_id++, i,
3079 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
3081 struct hns3_config_mac_speed_dup_cmd *req;
3082 struct hns3_cmd_desc desc;
3085 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data;
3087 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false);
3089 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
3092 case ETH_SPEED_NUM_10M:
3093 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3094 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
3096 case ETH_SPEED_NUM_100M:
3097 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3098 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
3100 case ETH_SPEED_NUM_1G:
3101 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3102 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
3104 case ETH_SPEED_NUM_10G:
3105 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3106 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
3108 case ETH_SPEED_NUM_25G:
3109 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3110 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
3112 case ETH_SPEED_NUM_40G:
3113 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3114 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
3116 case ETH_SPEED_NUM_50G:
3117 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3118 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
3120 case ETH_SPEED_NUM_100G:
3121 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3122 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
3124 case ETH_SPEED_NUM_200G:
3125 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
3126 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_200G);
3129 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed);
3133 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1);
3135 ret = hns3_cmd_send(hw, &desc, 1);
3137 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret);
3143 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3145 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3146 struct hns3_pf *pf = &hns->pf;
3147 struct hns3_priv_buf *priv;
3148 uint32_t i, total_size;
3150 total_size = pf->pkt_buf_size;
3152 /* alloc tx buffer for all enabled tc */
3153 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3154 priv = &buf_alloc->priv_buf[i];
3156 if (hw->hw_tc_map & BIT(i)) {
3157 if (total_size < pf->tx_buf_size)
3160 priv->tx_buf_size = pf->tx_buf_size;
3162 priv->tx_buf_size = 0;
3164 total_size -= priv->tx_buf_size;
3171 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3173 /* TX buffer size is unit by 128 byte */
3174 #define HNS3_BUF_SIZE_UNIT_SHIFT 7
3175 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15)
3176 struct hns3_tx_buff_alloc_cmd *req;
3177 struct hns3_cmd_desc desc;
3182 req = (struct hns3_tx_buff_alloc_cmd *)desc.data;
3184 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0);
3185 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3186 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
3188 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT;
3189 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size |
3190 HNS3_BUF_SIZE_UPDATE_EN_MSK);
3193 ret = hns3_cmd_send(hw, &desc, 1);
3195 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret);
3201 hns3_get_tc_num(struct hns3_hw *hw)
3206 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3207 if (hw->hw_tc_map & BIT(i))
3213 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3215 struct hns3_priv_buf *priv;
3216 uint32_t rx_priv = 0;
3219 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3220 priv = &buf_alloc->priv_buf[i];
3222 rx_priv += priv->buf_size;
3228 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
3230 uint32_t total_tx_size = 0;
3233 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
3234 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
3236 return total_tx_size;
3239 /* Get the number of pfc enabled TCs, which have private buffer */
3241 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3243 struct hns3_priv_buf *priv;
3247 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3248 priv = &buf_alloc->priv_buf[i];
3249 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3256 /* Get the number of pfc disabled TCs, which have private buffer */
3258 hns3_get_no_pfc_priv_num(struct hns3_hw *hw,
3259 struct hns3_pkt_buf_alloc *buf_alloc)
3261 struct hns3_priv_buf *priv;
3265 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3266 priv = &buf_alloc->priv_buf[i];
3267 if (hw->hw_tc_map & BIT(i) &&
3268 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
3276 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
3279 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
3280 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3281 struct hns3_pf *pf = &hns->pf;
3282 uint32_t shared_buf, aligned_mps;
3287 tc_num = hns3_get_tc_num(hw);
3288 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3290 if (hns3_dev_dcb_supported(hw))
3291 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
3294 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF
3297 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
3298 shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc),
3299 HNS3_BUF_SIZE_UNIT);
3301 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);
3302 if (rx_all < rx_priv + shared_std)
3305 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
3306 buf_alloc->s_buf.buf_size = shared_buf;
3307 if (hns3_dev_dcb_supported(hw)) {
3308 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
3309 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
3310 - roundup(aligned_mps / HNS3_BUF_DIV_BY,
3311 HNS3_BUF_SIZE_UNIT);
3313 buf_alloc->s_buf.self.high =
3314 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3315 buf_alloc->s_buf.self.low = aligned_mps;
3318 if (hns3_dev_dcb_supported(hw)) {
3319 hi_thrd = shared_buf - pf->dv_buf_size;
3321 if (tc_num <= NEED_RESERVE_TC_NUM)
3322 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
3326 hi_thrd = hi_thrd / tc_num;
3328 hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps);
3329 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);
3330 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;
3332 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
3333 lo_thrd = aligned_mps;
3336 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3337 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
3338 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
3345 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max,
3346 struct hns3_pkt_buf_alloc *buf_alloc)
3348 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3349 struct hns3_pf *pf = &hns->pf;
3350 struct hns3_priv_buf *priv;
3351 uint32_t aligned_mps;
3355 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3356 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
3358 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3359 priv = &buf_alloc->priv_buf[i];
3366 if (!(hw->hw_tc_map & BIT(i)))
3370 if (hw->dcb_info.hw_pfc_map & BIT(i)) {
3371 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT;
3372 priv->wl.high = roundup(priv->wl.low + aligned_mps,
3373 HNS3_BUF_SIZE_UNIT);
3376 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) :
3380 priv->buf_size = priv->wl.high + pf->dv_buf_size;
3383 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3387 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw,
3388 struct hns3_pkt_buf_alloc *buf_alloc)
3390 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3391 struct hns3_pf *pf = &hns->pf;
3392 struct hns3_priv_buf *priv;
3393 int no_pfc_priv_num;
3398 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3399 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc);
3401 /* let the last to be cleared first */
3402 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3403 priv = &buf_alloc->priv_buf[i];
3404 mask = BIT((uint8_t)i);
3406 if (hw->hw_tc_map & mask &&
3407 !(hw->dcb_info.hw_pfc_map & mask)) {
3408 /* Clear the no pfc TC private buffer */
3416 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3417 no_pfc_priv_num == 0)
3421 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3425 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
3426 struct hns3_pkt_buf_alloc *buf_alloc)
3428 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3429 struct hns3_pf *pf = &hns->pf;
3430 struct hns3_priv_buf *priv;
3436 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3437 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc);
3439 /* let the last to be cleared first */
3440 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
3441 priv = &buf_alloc->priv_buf[i];
3442 mask = BIT((uint8_t)i);
3444 if (hw->hw_tc_map & mask &&
3445 hw->dcb_info.hw_pfc_map & mask) {
3446 /* Reduce the number of pfc TC with private buffer */
3453 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
3458 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
3462 hns3_only_alloc_priv_buff(struct hns3_hw *hw,
3463 struct hns3_pkt_buf_alloc *buf_alloc)
3465 #define COMPENSATE_BUFFER 0x3C00
3466 #define COMPENSATE_HALF_MPS_NUM 5
3467 #define PRIV_WL_GAP 0x1800
3468 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3469 struct hns3_pf *pf = &hns->pf;
3470 uint32_t tc_num = hns3_get_tc_num(hw);
3471 uint32_t half_mps = pf->mps >> 1;
3472 struct hns3_priv_buf *priv;
3473 uint32_t min_rx_priv;
3477 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
3479 rx_priv = rx_priv / tc_num;
3481 if (tc_num <= NEED_RESERVE_TC_NUM)
3482 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
3485 * Minimum value of private buffer in rx direction (min_rx_priv) is
3486 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private
3487 * buffer if rx_priv is greater than min_rx_priv.
3489 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER +
3490 COMPENSATE_HALF_MPS_NUM * half_mps;
3491 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);
3492 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);
3494 if (rx_priv < min_rx_priv)
3497 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3498 priv = &buf_alloc->priv_buf[i];
3505 if (!(hw->hw_tc_map & BIT(i)))
3509 priv->buf_size = rx_priv;
3510 priv->wl.high = rx_priv - pf->dv_buf_size;
3511 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
3514 buf_alloc->s_buf.buf_size = 0;
3520 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
3521 * @hw: pointer to struct hns3_hw
3522 * @buf_alloc: pointer to buffer calculation data
3523 * @return: 0: calculate sucessful, negative: fail
3526 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3528 /* When DCB is not supported, rx private buffer is not allocated. */
3529 if (!hns3_dev_dcb_supported(hw)) {
3530 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3531 struct hns3_pf *pf = &hns->pf;
3532 uint32_t rx_all = pf->pkt_buf_size;
3534 rx_all -= hns3_get_tx_buff_alloced(buf_alloc);
3535 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all))
3542 * Try to allocate privated packet buffer for all TCs without share
3545 if (hns3_only_alloc_priv_buff(hw, buf_alloc))
3549 * Try to allocate privated packet buffer for all TCs with share
3552 if (hns3_rx_buf_calc_all(hw, true, buf_alloc))
3556 * For different application scenes, the enabled port number, TC number
3557 * and no_drop TC number are different. In order to obtain the better
3558 * performance, software could allocate the buffer size and configure
3559 * the waterline by tring to decrease the private buffer size according
3560 * to the order, namely, waterline of valided tc, pfc disabled tc, pfc
3563 if (hns3_rx_buf_calc_all(hw, false, buf_alloc))
3566 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc))
3569 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc))
3576 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3578 struct hns3_rx_priv_buff_cmd *req;
3579 struct hns3_cmd_desc desc;
3584 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false);
3585 req = (struct hns3_rx_priv_buff_cmd *)desc.data;
3587 /* Alloc private buffer TCs */
3588 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3589 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i];
3592 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S);
3593 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B);
3596 buf_size = buf_alloc->s_buf.buf_size;
3597 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) |
3598 (1 << HNS3_TC0_PRI_BUF_EN_B));
3600 ret = hns3_cmd_send(hw, &desc, 1);
3602 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret);
3608 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3610 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2
3611 struct hns3_rx_priv_wl_buf *req;
3612 struct hns3_priv_buf *priv;
3613 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM];
3617 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) {
3618 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC,
3620 req = (struct hns3_rx_priv_wl_buf *)desc[i].data;
3622 /* The first descriptor set the NEXT bit to 1 */
3624 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3626 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3628 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3629 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j;
3631 priv = &buf_alloc->priv_buf[idx];
3632 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >>
3634 req->tc_wl[j].high |=
3635 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3636 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >>
3638 req->tc_wl[j].low |=
3639 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3643 /* Send 2 descriptor at one time */
3644 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM);
3646 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d",
3652 hns3_common_thrd_config(struct hns3_hw *hw,
3653 struct hns3_pkt_buf_alloc *buf_alloc)
3655 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2
3656 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf;
3657 struct hns3_rx_com_thrd *req;
3658 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM];
3659 struct hns3_tc_thrd *tc;
3664 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) {
3665 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC,
3667 req = (struct hns3_rx_com_thrd *)&desc[i].data;
3669 /* The first descriptor set the NEXT bit to 1 */
3671 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3673 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3675 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3676 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j;
3677 tc = &s_buf->tc_thrd[tc_idx];
3679 req->com_thrd[j].high =
3680 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S);
3681 req->com_thrd[j].high |=
3682 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3683 req->com_thrd[j].low =
3684 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S);
3685 req->com_thrd[j].low |=
3686 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3690 /* Send 2 descriptors at one time */
3691 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM);
3693 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret);
3699 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3701 struct hns3_shared_buf *buf = &buf_alloc->s_buf;
3702 struct hns3_rx_com_wl *req;
3703 struct hns3_cmd_desc desc;
3706 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false);
3708 req = (struct hns3_rx_com_wl *)desc.data;
3709 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S);
3710 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3712 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S);
3713 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3715 ret = hns3_cmd_send(hw, &desc, 1);
3717 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret);
3723 hns3_buffer_alloc(struct hns3_hw *hw)
3725 struct hns3_pkt_buf_alloc pkt_buf;
3728 memset(&pkt_buf, 0, sizeof(pkt_buf));
3729 ret = hns3_tx_buffer_calc(hw, &pkt_buf);
3732 "could not calc tx buffer size for all TCs %d",
3737 ret = hns3_tx_buffer_alloc(hw, &pkt_buf);
3739 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret);
3743 ret = hns3_rx_buffer_calc(hw, &pkt_buf);
3746 "could not calc rx priv buffer size for all TCs %d",
3751 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf);
3753 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret);
3757 if (hns3_dev_dcb_supported(hw)) {
3758 ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
3761 "could not configure rx private waterline %d",
3766 ret = hns3_common_thrd_config(hw, &pkt_buf);
3769 "could not configure common threshold %d",
3775 ret = hns3_common_wl_config(hw, &pkt_buf);
3777 PMD_INIT_LOG(ERR, "could not configure common waterline %d",
3784 hns3_mac_init(struct hns3_hw *hw)
3786 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3787 struct hns3_mac *mac = &hw->mac;
3788 struct hns3_pf *pf = &hns->pf;
3791 pf->support_sfp_query = true;
3792 mac->link_duplex = ETH_LINK_FULL_DUPLEX;
3793 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
3795 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
3799 mac->link_status = ETH_LINK_DOWN;
3801 return hns3_config_mtu(hw, pf->mps);
3805 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
3807 #define HNS3_ETHERTYPE_SUCCESS_ADD 0
3808 #define HNS3_ETHERTYPE_ALREADY_ADD 1
3809 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2
3810 #define HNS3_ETHERTYPE_KEY_CONFLICT 3
3815 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
3820 switch (resp_code) {
3821 case HNS3_ETHERTYPE_SUCCESS_ADD:
3822 case HNS3_ETHERTYPE_ALREADY_ADD:
3825 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW:
3827 "add mac ethertype failed for manager table overflow.");
3828 return_status = -EIO;
3830 case HNS3_ETHERTYPE_KEY_CONFLICT:
3831 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict.");
3832 return_status = -EIO;
3836 "add mac ethertype failed for undefined, code=%d.",
3838 return_status = -EIO;
3842 return return_status;
3846 hns3_add_mgr_tbl(struct hns3_hw *hw,
3847 const struct hns3_mac_mgr_tbl_entry_cmd *req)
3849 struct hns3_cmd_desc desc;
3854 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false);
3855 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd));
3857 ret = hns3_cmd_send(hw, &desc, 1);
3860 "add mac ethertype failed for cmd_send, ret =%d.",
3865 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
3866 retval = rte_le_to_cpu_16(desc.retval);
3868 return hns3_get_mac_ethertype_cmd_status(retval, resp_code);
3872 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table,
3873 int *table_item_num)
3875 struct hns3_mac_mgr_tbl_entry_cmd *tbl;
3878 * In current version, we add one item in management table as below:
3879 * 0x0180C200000E -- LLDP MC address
3882 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B;
3883 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP);
3884 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200));
3885 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E));
3886 tbl->i_port_bitmap = 0x1;
3887 *table_item_num = 1;
3891 hns3_init_mgr_tbl(struct hns3_hw *hw)
3893 #define HNS_MAC_MGR_TBL_MAX_SIZE 16
3894 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE];
3899 memset(mgr_table, 0, sizeof(mgr_table));
3900 hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
3901 for (i = 0; i < table_item_num; i++) {
3902 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
3904 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
3914 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
3915 bool en_mc, bool en_bc, int vport_id)
3920 memset(param, 0, sizeof(struct hns3_promisc_param));
3922 param->enable = HNS3_PROMISC_EN_UC;
3924 param->enable |= HNS3_PROMISC_EN_MC;
3926 param->enable |= HNS3_PROMISC_EN_BC;
3927 param->vf_id = vport_id;
3931 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
3933 struct hns3_promisc_cfg_cmd *req;
3934 struct hns3_cmd_desc desc;
3937 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
3939 req = (struct hns3_promisc_cfg_cmd *)desc.data;
3940 req->vf_id = param->vf_id;
3941 req->flag = (param->enable << HNS3_PROMISC_EN_B) |
3942 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
3944 ret = hns3_cmd_send(hw, &desc, 1);
3946 PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret);
3952 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
3954 struct hns3_promisc_param param;
3955 bool en_bc_pmc = true;
3959 * In current version VF is not supported when PF is driven by DPDK
3960 * driver, just need to configure parameters for PF vport.
3962 vf_id = HNS3_PF_FUNC_ID;
3964 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
3965 return hns3_cmd_set_promisc_mode(hw, ¶m);
3969 hns3_promisc_init(struct hns3_hw *hw)
3971 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3972 struct hns3_pf *pf = &hns->pf;
3973 struct hns3_promisc_param param;
3977 ret = hns3_set_promisc_mode(hw, false, false);
3979 PMD_INIT_LOG(ERR, "failed to set promisc mode, ret = %d", ret);
3984 * In current version VFs are not supported when PF is driven by DPDK
3985 * driver. After PF has been taken over by DPDK, the original VF will
3986 * be invalid. So, there is a possibility of entry residues. It should
3987 * clear VFs's promisc mode to avoid unnecessary bandwidth usage
3990 for (func_id = HNS3_1ST_VF_FUNC_ID; func_id < pf->func_num; func_id++) {
3991 hns3_promisc_param_init(¶m, false, false, false, func_id);
3992 ret = hns3_cmd_set_promisc_mode(hw, ¶m);
3994 PMD_INIT_LOG(ERR, "failed to clear vf:%d promisc mode,"
3995 " ret = %d", func_id, ret);
4004 hns3_promisc_uninit(struct hns3_hw *hw)
4006 struct hns3_promisc_param param;
4010 func_id = HNS3_PF_FUNC_ID;
4013 * In current version VFs are not supported when PF is driven by
4014 * DPDK driver, and VFs' promisc mode status has been cleared during
4015 * init and their status will not change. So just clear PF's promisc
4016 * mode status during uninit.
4018 hns3_promisc_param_init(¶m, false, false, false, func_id);
4019 ret = hns3_cmd_set_promisc_mode(hw, ¶m);
4021 PMD_INIT_LOG(ERR, "failed to clear promisc status during"
4022 " uninit, ret = %d", ret);
4026 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
4028 bool allmulti = dev->data->all_multicast ? true : false;
4029 struct hns3_adapter *hns = dev->data->dev_private;
4030 struct hns3_hw *hw = &hns->hw;
4035 rte_spinlock_lock(&hw->lock);
4036 ret = hns3_set_promisc_mode(hw, true, true);
4038 rte_spinlock_unlock(&hw->lock);
4039 hns3_err(hw, "failed to enable promiscuous mode, ret = %d",
4045 * When promiscuous mode was enabled, disable the vlan filter to let
4046 * all packets coming in in the receiving direction.
4048 offloads = dev->data->dev_conf.rxmode.offloads;
4049 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
4050 ret = hns3_enable_vlan_filter(hns, false);
4052 hns3_err(hw, "failed to enable promiscuous mode due to "
4053 "failure to disable vlan filter, ret = %d",
4055 err = hns3_set_promisc_mode(hw, false, allmulti);
4057 hns3_err(hw, "failed to restore promiscuous "
4058 "status after disable vlan filter "
4059 "failed during enabling promiscuous "
4060 "mode, ret = %d", ret);
4064 rte_spinlock_unlock(&hw->lock);
4070 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
4072 bool allmulti = dev->data->all_multicast ? true : false;
4073 struct hns3_adapter *hns = dev->data->dev_private;
4074 struct hns3_hw *hw = &hns->hw;
4079 /* If now in all_multicast mode, must remain in all_multicast mode. */
4080 rte_spinlock_lock(&hw->lock);
4081 ret = hns3_set_promisc_mode(hw, false, allmulti);
4083 rte_spinlock_unlock(&hw->lock);
4084 hns3_err(hw, "failed to disable promiscuous mode, ret = %d",
4088 /* when promiscuous mode was disabled, restore the vlan filter status */
4089 offloads = dev->data->dev_conf.rxmode.offloads;
4090 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
4091 ret = hns3_enable_vlan_filter(hns, true);
4093 hns3_err(hw, "failed to disable promiscuous mode due to"
4094 " failure to restore vlan filter, ret = %d",
4096 err = hns3_set_promisc_mode(hw, true, true);
4098 hns3_err(hw, "failed to restore promiscuous "
4099 "status after enabling vlan filter "
4100 "failed during disabling promiscuous "
4101 "mode, ret = %d", ret);
4104 rte_spinlock_unlock(&hw->lock);
4110 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
4112 struct hns3_adapter *hns = dev->data->dev_private;
4113 struct hns3_hw *hw = &hns->hw;
4116 if (dev->data->promiscuous)
4119 rte_spinlock_lock(&hw->lock);
4120 ret = hns3_set_promisc_mode(hw, false, true);
4121 rte_spinlock_unlock(&hw->lock);
4123 hns3_err(hw, "failed to enable allmulticast mode, ret = %d",
4130 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
4132 struct hns3_adapter *hns = dev->data->dev_private;
4133 struct hns3_hw *hw = &hns->hw;
4136 /* If now in promiscuous mode, must remain in all_multicast mode. */
4137 if (dev->data->promiscuous)
4140 rte_spinlock_lock(&hw->lock);
4141 ret = hns3_set_promisc_mode(hw, false, false);
4142 rte_spinlock_unlock(&hw->lock);
4144 hns3_err(hw, "failed to disable allmulticast mode, ret = %d",
4151 hns3_dev_promisc_restore(struct hns3_adapter *hns)
4153 struct hns3_hw *hw = &hns->hw;
4154 bool allmulti = hw->data->all_multicast ? true : false;
4157 if (hw->data->promiscuous) {
4158 ret = hns3_set_promisc_mode(hw, true, true);
4160 hns3_err(hw, "failed to restore promiscuous mode, "
4165 ret = hns3_set_promisc_mode(hw, false, allmulti);
4167 hns3_err(hw, "failed to restore allmulticast mode, ret = %d",
4173 hns3_get_sfp_speed(struct hns3_hw *hw, uint32_t *speed)
4175 struct hns3_sfp_speed_cmd *resp;
4176 struct hns3_cmd_desc desc;
4179 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SFP_GET_SPEED, true);
4180 resp = (struct hns3_sfp_speed_cmd *)desc.data;
4181 ret = hns3_cmd_send(hw, &desc, 1);
4182 if (ret == -EOPNOTSUPP) {
4183 hns3_err(hw, "IMP do not support get SFP speed %d", ret);
4186 hns3_err(hw, "get sfp speed failed %d", ret);
4190 *speed = resp->sfp_speed;
4196 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
4198 if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
4199 duplex = ETH_LINK_FULL_DUPLEX;
4205 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
4207 struct hns3_mac *mac = &hw->mac;
4210 duplex = hns3_check_speed_dup(duplex, speed);
4211 if (mac->link_speed == speed && mac->link_duplex == duplex)
4214 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex);
4218 mac->link_speed = speed;
4219 mac->link_duplex = duplex;
4225 hns3_update_speed_duplex(struct rte_eth_dev *eth_dev)
4227 struct hns3_adapter *hns = eth_dev->data->dev_private;
4228 struct hns3_hw *hw = &hns->hw;
4229 struct hns3_pf *pf = &hns->pf;
4233 /* If IMP do not support get SFP/qSFP speed, return directly */
4234 if (!pf->support_sfp_query)
4237 ret = hns3_get_sfp_speed(hw, &speed);
4238 if (ret == -EOPNOTSUPP) {
4239 pf->support_sfp_query = false;
4244 if (speed == ETH_SPEED_NUM_NONE)
4245 return 0; /* do nothing if no SFP */
4247 /* Config full duplex for SFP */
4248 return hns3_cfg_mac_speed_dup(hw, speed, ETH_LINK_FULL_DUPLEX);
4252 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
4254 struct hns3_config_mac_mode_cmd *req;
4255 struct hns3_cmd_desc desc;
4256 uint32_t loop_en = 0;
4260 req = (struct hns3_config_mac_mode_cmd *)desc.data;
4262 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false);
4265 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val);
4266 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val);
4267 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val);
4268 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val);
4269 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0);
4270 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0);
4271 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0);
4272 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0);
4273 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val);
4274 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
4277 * If DEV_RX_OFFLOAD_KEEP_CRC offload is set, MAC will not strip CRC
4278 * when receiving frames. Otherwise, CRC will be stripped.
4280 if (hw->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4281 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, 0);
4283 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
4284 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val);
4285 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val);
4286 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val);
4287 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en);
4289 ret = hns3_cmd_send(hw, &desc, 1);
4291 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret);
4297 hns3_get_mac_link_status(struct hns3_hw *hw)
4299 struct hns3_link_status_cmd *req;
4300 struct hns3_cmd_desc desc;
4304 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true);
4305 ret = hns3_cmd_send(hw, &desc, 1);
4307 hns3_err(hw, "get link status cmd failed %d", ret);
4308 return ETH_LINK_DOWN;
4311 req = (struct hns3_link_status_cmd *)desc.data;
4312 link_status = req->status & HNS3_LINK_STATUS_UP_M;
4314 return !!link_status;
4318 hns3_update_link_status(struct hns3_hw *hw)
4322 state = hns3_get_mac_link_status(hw);
4323 if (state != hw->mac.link_status) {
4324 hw->mac.link_status = state;
4325 hns3_warn(hw, "Link status change to %s!", state ? "up" : "down");
4330 hns3_service_handler(void *param)
4332 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
4333 struct hns3_adapter *hns = eth_dev->data->dev_private;
4334 struct hns3_hw *hw = &hns->hw;
4336 if (!hns3_is_reset_pending(hns)) {
4337 hns3_update_speed_duplex(eth_dev);
4338 hns3_update_link_status(hw);
4340 hns3_warn(hw, "Cancel the query when reset is pending");
4342 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
4346 hns3_init_hardware(struct hns3_adapter *hns)
4348 struct hns3_hw *hw = &hns->hw;
4351 ret = hns3_map_tqp(hw);
4353 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
4357 ret = hns3_init_umv_space(hw);
4359 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret);
4363 ret = hns3_mac_init(hw);
4365 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret);
4369 ret = hns3_init_mgr_tbl(hw);
4371 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret);
4375 ret = hns3_promisc_init(hw);
4377 PMD_INIT_LOG(ERR, "Failed to init promisc: %d",
4382 ret = hns3_init_vlan_config(hns);
4384 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret);
4388 ret = hns3_dcb_init(hw);
4390 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret);
4394 ret = hns3_init_fd_config(hns);
4396 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret);
4400 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
4402 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
4406 ret = hns3_config_gro(hw, false);
4408 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
4413 * In the initialization clearing the all hardware mapping relationship
4414 * configurations between queues and interrupt vectors is needed, so
4415 * some error caused by the residual configurations, such as the
4416 * unexpected interrupt, can be avoid.
4418 ret = hns3_init_ring_with_vector(hw);
4420 PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
4427 hns3_uninit_umv_space(hw);
4432 hns3_clear_hw(struct hns3_hw *hw)
4434 struct hns3_cmd_desc desc;
4437 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CLEAR_HW_STATE, false);
4439 ret = hns3_cmd_send(hw, &desc, 1);
4440 if (ret && ret != -EOPNOTSUPP)
4447 hns3_config_all_msix_error(struct hns3_hw *hw, bool enable)
4452 * The new firmware support report more hardware error types by
4453 * msix mode. These errors are defined as RAS errors in hardware
4454 * and belong to a different type from the MSI-x errors processed
4455 * by the network driver.
4457 * Network driver should open the new error report on initialition
4459 val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
4460 hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0);
4461 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val);
4465 hns3_init_pf(struct rte_eth_dev *eth_dev)
4467 struct rte_device *dev = eth_dev->device;
4468 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4469 struct hns3_adapter *hns = eth_dev->data->dev_private;
4470 struct hns3_hw *hw = &hns->hw;
4473 PMD_INIT_FUNC_TRACE();
4475 /* Get hardware io base address from pcie BAR2 IO space */
4476 hw->io_base = pci_dev->mem_resource[2].addr;
4478 /* Firmware command queue initialize */
4479 ret = hns3_cmd_init_queue(hw);
4481 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
4482 goto err_cmd_init_queue;
4485 hns3_clear_all_event_cause(hw);
4487 /* Firmware command initialize */
4488 ret = hns3_cmd_init(hw);
4490 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
4495 * To ensure that the hardware environment is clean during
4496 * initialization, the driver actively clear the hardware environment
4497 * during initialization, including PF and corresponding VFs' vlan, mac,
4498 * flow table configurations, etc.
4500 ret = hns3_clear_hw(hw);
4502 PMD_INIT_LOG(ERR, "failed to clear hardware: %d", ret);
4506 hns3_config_all_msix_error(hw, true);
4508 ret = rte_intr_callback_register(&pci_dev->intr_handle,
4509 hns3_interrupt_handler,
4512 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
4513 goto err_intr_callback_register;
4516 /* Enable interrupt */
4517 rte_intr_enable(&pci_dev->intr_handle);
4518 hns3_pf_enable_irq0(hw);
4520 /* Get configuration */
4521 ret = hns3_get_configuration(hw);
4523 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
4524 goto err_get_config;
4527 ret = hns3_init_hardware(hns);
4529 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
4530 goto err_get_config;
4533 /* Initialize flow director filter list & hash */
4534 ret = hns3_fdir_filter_init(hns);
4536 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
4540 hns3_set_default_rss_args(hw);
4542 ret = hns3_enable_hw_error_intr(hns, true);
4544 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d",
4552 hns3_fdir_filter_uninit(hns);
4554 hns3_uninit_umv_space(hw);
4557 hns3_pf_disable_irq0(hw);
4558 rte_intr_disable(&pci_dev->intr_handle);
4559 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
4561 err_intr_callback_register:
4563 hns3_cmd_uninit(hw);
4564 hns3_cmd_destroy_queue(hw);
4572 hns3_uninit_pf(struct rte_eth_dev *eth_dev)
4574 struct hns3_adapter *hns = eth_dev->data->dev_private;
4575 struct rte_device *dev = eth_dev->device;
4576 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4577 struct hns3_hw *hw = &hns->hw;
4579 PMD_INIT_FUNC_TRACE();
4581 hns3_enable_hw_error_intr(hns, false);
4582 hns3_rss_uninit(hns);
4583 (void)hns3_config_gro(hw, false);
4584 hns3_promisc_uninit(hw);
4585 hns3_fdir_filter_uninit(hns);
4586 hns3_uninit_umv_space(hw);
4587 hns3_pf_disable_irq0(hw);
4588 rte_intr_disable(&pci_dev->intr_handle);
4589 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
4591 hns3_config_all_msix_error(hw, false);
4592 hns3_cmd_uninit(hw);
4593 hns3_cmd_destroy_queue(hw);
4598 hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
4600 struct hns3_hw *hw = &hns->hw;
4603 ret = hns3_dcb_cfg_update(hns);
4608 ret = hns3_start_queues(hns, reset_queue);
4610 PMD_INIT_LOG(ERR, "Failed to start queues: %d", ret);
4615 ret = hns3_cfg_mac_mode(hw, true);
4617 PMD_INIT_LOG(ERR, "Failed to enable MAC: %d", ret);
4618 goto err_config_mac_mode;
4622 err_config_mac_mode:
4623 hns3_stop_queues(hns, true);
4628 hns3_map_rx_interrupt(struct rte_eth_dev *dev)
4630 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4631 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4632 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4633 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
4634 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
4635 uint32_t intr_vector;
4639 if (dev->data->dev_conf.intr_conf.rxq == 0)
4642 /* disable uio/vfio intr/eventfd mapping */
4643 rte_intr_disable(intr_handle);
4645 /* check and configure queue intr-vector mapping */
4646 if (rte_intr_cap_multiple(intr_handle) ||
4647 !RTE_ETH_DEV_SRIOV(dev).active) {
4648 intr_vector = hw->used_rx_queues;
4649 /* creates event fd for each intr vector when MSIX is used */
4650 if (rte_intr_efd_enable(intr_handle, intr_vector))
4653 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
4654 intr_handle->intr_vec =
4655 rte_zmalloc("intr_vec",
4656 hw->used_rx_queues * sizeof(int), 0);
4657 if (intr_handle->intr_vec == NULL) {
4658 hns3_err(hw, "Failed to allocate %d rx_queues"
4659 " intr_vec", hw->used_rx_queues);
4661 goto alloc_intr_vec_error;
4665 if (rte_intr_allow_others(intr_handle)) {
4666 vec = RTE_INTR_VEC_RXTX_OFFSET;
4667 base = RTE_INTR_VEC_RXTX_OFFSET;
4669 if (rte_intr_dp_is_en(intr_handle)) {
4670 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
4671 ret = hns3_bind_ring_with_vector(hw, vec, true,
4675 goto bind_vector_error;
4676 intr_handle->intr_vec[q_id] = vec;
4677 if (vec < base + intr_handle->nb_efd - 1)
4681 rte_intr_enable(intr_handle);
4685 rte_intr_efd_disable(intr_handle);
4686 if (intr_handle->intr_vec) {
4687 free(intr_handle->intr_vec);
4688 intr_handle->intr_vec = NULL;
4691 alloc_intr_vec_error:
4692 rte_intr_efd_disable(intr_handle);
4697 hns3_restore_rx_interrupt(struct hns3_hw *hw)
4699 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
4700 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4701 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4705 if (dev->data->dev_conf.intr_conf.rxq == 0)
4708 if (rte_intr_dp_is_en(intr_handle)) {
4709 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
4710 ret = hns3_bind_ring_with_vector(hw,
4711 intr_handle->intr_vec[q_id], true,
4712 HNS3_RING_TYPE_RX, q_id);
4722 hns3_restore_filter(struct rte_eth_dev *dev)
4724 hns3_restore_rss_filter(dev);
4728 hns3_dev_start(struct rte_eth_dev *dev)
4730 struct hns3_adapter *hns = dev->data->dev_private;
4731 struct hns3_hw *hw = &hns->hw;
4734 PMD_INIT_FUNC_TRACE();
4735 if (rte_atomic16_read(&hw->reset.resetting))
4738 rte_spinlock_lock(&hw->lock);
4739 hw->adapter_state = HNS3_NIC_STARTING;
4741 ret = hns3_do_start(hns, true);
4743 hw->adapter_state = HNS3_NIC_CONFIGURED;
4744 rte_spinlock_unlock(&hw->lock);
4747 ret = hns3_map_rx_interrupt(dev);
4749 hw->adapter_state = HNS3_NIC_CONFIGURED;
4750 rte_spinlock_unlock(&hw->lock);
4754 hw->adapter_state = HNS3_NIC_STARTED;
4755 rte_spinlock_unlock(&hw->lock);
4757 hns3_rx_scattered_calc(dev);
4758 hns3_set_rxtx_function(dev);
4759 hns3_mp_req_start_rxtx(dev);
4760 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
4762 hns3_restore_filter(dev);
4764 /* Enable interrupt of all rx queues before enabling queues */
4765 hns3_dev_all_rx_queue_intr_enable(hw, true);
4767 * When finished the initialization, enable queues to receive/transmit
4770 hns3_enable_all_queues(hw, true);
4772 hns3_info(hw, "hns3 dev start successful!");
4777 hns3_do_stop(struct hns3_adapter *hns)
4779 struct hns3_hw *hw = &hns->hw;
4783 ret = hns3_cfg_mac_mode(hw, false);
4786 hw->mac.link_status = ETH_LINK_DOWN;
4788 if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
4789 hns3_configure_all_mac_addr(hns, true);
4792 reset_queue = false;
4793 hw->mac.default_addr_setted = false;
4794 return hns3_stop_queues(hns, reset_queue);
4798 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
4800 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4801 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4802 struct hns3_adapter *hns = dev->data->dev_private;
4803 struct hns3_hw *hw = &hns->hw;
4804 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
4805 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
4808 if (dev->data->dev_conf.intr_conf.rxq == 0)
4811 /* unmap the ring with vector */
4812 if (rte_intr_allow_others(intr_handle)) {
4813 vec = RTE_INTR_VEC_RXTX_OFFSET;
4814 base = RTE_INTR_VEC_RXTX_OFFSET;
4816 if (rte_intr_dp_is_en(intr_handle)) {
4817 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
4818 (void)hns3_bind_ring_with_vector(hw, vec, false,
4821 if (vec < base + intr_handle->nb_efd - 1)
4825 /* Clean datapath event and queue/vec mapping */
4826 rte_intr_efd_disable(intr_handle);
4827 if (intr_handle->intr_vec) {
4828 rte_free(intr_handle->intr_vec);
4829 intr_handle->intr_vec = NULL;
4834 hns3_dev_stop(struct rte_eth_dev *dev)
4836 struct hns3_adapter *hns = dev->data->dev_private;
4837 struct hns3_hw *hw = &hns->hw;
4839 PMD_INIT_FUNC_TRACE();
4841 hw->adapter_state = HNS3_NIC_STOPPING;
4842 hns3_set_rxtx_function(dev);
4844 /* Disable datapath on secondary process. */
4845 hns3_mp_req_stop_rxtx(dev);
4846 /* Prevent crashes when queues are still in use. */
4847 rte_delay_ms(hw->tqps_num);
4849 rte_spinlock_lock(&hw->lock);
4850 if (rte_atomic16_read(&hw->reset.resetting) == 0) {
4852 hns3_unmap_rx_interrupt(dev);
4853 hns3_dev_release_mbufs(hns);
4854 hw->adapter_state = HNS3_NIC_CONFIGURED;
4856 hns3_rx_scattered_reset(dev);
4857 rte_eal_alarm_cancel(hns3_service_handler, dev);
4858 rte_spinlock_unlock(&hw->lock);
4862 hns3_dev_close(struct rte_eth_dev *eth_dev)
4864 struct hns3_adapter *hns = eth_dev->data->dev_private;
4865 struct hns3_hw *hw = &hns->hw;
4867 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
4868 rte_free(eth_dev->process_private);
4869 eth_dev->process_private = NULL;
4873 if (hw->adapter_state == HNS3_NIC_STARTED)
4874 hns3_dev_stop(eth_dev);
4876 hw->adapter_state = HNS3_NIC_CLOSING;
4877 hns3_reset_abort(hns);
4878 hw->adapter_state = HNS3_NIC_CLOSED;
4880 hns3_configure_all_mc_mac_addr(hns, true);
4881 hns3_remove_all_vlan_table(hns);
4882 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
4883 hns3_uninit_pf(eth_dev);
4884 hns3_free_all_queues(eth_dev);
4885 rte_free(hw->reset.wait_data);
4886 rte_free(eth_dev->process_private);
4887 eth_dev->process_private = NULL;
4888 hns3_mp_uninit_primary();
4889 hns3_warn(hw, "Close port %d finished", hw->data->port_id);
4893 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4895 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4896 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4898 fc_conf->pause_time = pf->pause_time;
4900 /* return fc current mode */
4901 switch (hw->current_mode) {
4903 fc_conf->mode = RTE_FC_FULL;
4905 case HNS3_FC_TX_PAUSE:
4906 fc_conf->mode = RTE_FC_TX_PAUSE;
4908 case HNS3_FC_RX_PAUSE:
4909 fc_conf->mode = RTE_FC_RX_PAUSE;
4913 fc_conf->mode = RTE_FC_NONE;
4921 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
4925 hw->requested_mode = HNS3_FC_NONE;
4927 case RTE_FC_RX_PAUSE:
4928 hw->requested_mode = HNS3_FC_RX_PAUSE;
4930 case RTE_FC_TX_PAUSE:
4931 hw->requested_mode = HNS3_FC_TX_PAUSE;
4934 hw->requested_mode = HNS3_FC_FULL;
4937 hw->requested_mode = HNS3_FC_NONE;
4938 hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
4939 "configured to RTE_FC_NONE", mode);
4945 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4947 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4948 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4951 if (fc_conf->high_water || fc_conf->low_water ||
4952 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) {
4953 hns3_err(hw, "Unsupported flow control settings specified, "
4954 "high_water(%u), low_water(%u), send_xon(%u) and "
4955 "mac_ctrl_frame_fwd(%u) must be set to '0'",
4956 fc_conf->high_water, fc_conf->low_water,
4957 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd);
4960 if (fc_conf->autoneg) {
4961 hns3_err(hw, "Unsupported fc auto-negotiation setting.");
4964 if (!fc_conf->pause_time) {
4965 hns3_err(hw, "Invalid pause time %d setting.",
4966 fc_conf->pause_time);
4970 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
4971 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) {
4972 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. "
4973 "current_fc_status = %d", hw->current_fc_status);
4977 hns3_get_fc_mode(hw, fc_conf->mode);
4978 if (hw->requested_mode == hw->current_mode &&
4979 pf->pause_time == fc_conf->pause_time)
4982 rte_spinlock_lock(&hw->lock);
4983 ret = hns3_fc_enable(dev, fc_conf);
4984 rte_spinlock_unlock(&hw->lock);
4990 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,
4991 struct rte_eth_pfc_conf *pfc_conf)
4993 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4994 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4998 if (!hns3_dev_dcb_supported(hw)) {
4999 hns3_err(hw, "This port does not support dcb configurations.");
5003 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water ||
5004 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) {
5005 hns3_err(hw, "Unsupported flow control settings specified, "
5006 "high_water(%u), low_water(%u), send_xon(%u) and "
5007 "mac_ctrl_frame_fwd(%u) must be set to '0'",
5008 pfc_conf->fc.high_water, pfc_conf->fc.low_water,
5009 pfc_conf->fc.send_xon,
5010 pfc_conf->fc.mac_ctrl_frame_fwd);
5013 if (pfc_conf->fc.autoneg) {
5014 hns3_err(hw, "Unsupported fc auto-negotiation setting.");
5017 if (pfc_conf->fc.pause_time == 0) {
5018 hns3_err(hw, "Invalid pause time %d setting.",
5019 pfc_conf->fc.pause_time);
5023 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
5024 hw->current_fc_status == HNS3_FC_STATUS_PFC)) {
5025 hns3_err(hw, "MAC pause is enabled. Cannot set PFC."
5026 "current_fc_status = %d", hw->current_fc_status);
5030 priority = pfc_conf->priority;
5031 hns3_get_fc_mode(hw, pfc_conf->fc.mode);
5032 if (hw->dcb_info.pfc_en & BIT(priority) &&
5033 hw->requested_mode == hw->current_mode &&
5034 pfc_conf->fc.pause_time == pf->pause_time)
5037 rte_spinlock_lock(&hw->lock);
5038 ret = hns3_dcb_pfc_enable(dev, pfc_conf);
5039 rte_spinlock_unlock(&hw->lock);
5045 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
5047 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5048 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5049 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
5052 rte_spinlock_lock(&hw->lock);
5053 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
5054 dcb_info->nb_tcs = pf->local_max_tc;
5056 dcb_info->nb_tcs = 1;
5058 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
5059 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i];
5060 for (i = 0; i < dcb_info->nb_tcs; i++)
5061 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
5063 for (i = 0; i < hw->num_tc; i++) {
5064 dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i;
5065 dcb_info->tc_queue.tc_txq[0][i].base =
5066 hw->tc_queue[i].tqp_offset;
5067 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size;
5068 dcb_info->tc_queue.tc_txq[0][i].nb_queue =
5069 hw->tc_queue[i].tqp_count;
5071 rte_spinlock_unlock(&hw->lock);
5077 hns3_reinit_dev(struct hns3_adapter *hns)
5079 struct hns3_hw *hw = &hns->hw;
5082 ret = hns3_cmd_init(hw);
5084 hns3_err(hw, "Failed to init cmd: %d", ret);
5088 ret = hns3_reset_all_queues(hns);
5090 hns3_err(hw, "Failed to reset all queues: %d", ret);
5094 ret = hns3_init_hardware(hns);
5096 hns3_err(hw, "Failed to init hardware: %d", ret);
5100 ret = hns3_enable_hw_error_intr(hns, true);
5102 hns3_err(hw, "fail to enable hw error interrupts: %d",
5106 hns3_info(hw, "Reset done, driver initialization finished.");
5112 is_pf_reset_done(struct hns3_hw *hw)
5114 uint32_t val, reg, reg_bit;
5116 switch (hw->reset.level) {
5117 case HNS3_IMP_RESET:
5118 reg = HNS3_GLOBAL_RESET_REG;
5119 reg_bit = HNS3_IMP_RESET_BIT;
5121 case HNS3_GLOBAL_RESET:
5122 reg = HNS3_GLOBAL_RESET_REG;
5123 reg_bit = HNS3_GLOBAL_RESET_BIT;
5125 case HNS3_FUNC_RESET:
5126 reg = HNS3_FUN_RST_ING;
5127 reg_bit = HNS3_FUN_RST_ING_B;
5129 case HNS3_FLR_RESET:
5131 hns3_err(hw, "Wait for unsupported reset level: %d",
5135 val = hns3_read_dev(hw, reg);
5136 if (hns3_get_bit(val, reg_bit))
5143 hns3_is_reset_pending(struct hns3_adapter *hns)
5145 struct hns3_hw *hw = &hns->hw;
5146 enum hns3_reset_level reset;
5148 hns3_check_event_cause(hns, NULL);
5149 reset = hns3_get_reset_level(hns, &hw->reset.pending);
5150 if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
5151 hns3_warn(hw, "High level reset %d is pending", reset);
5154 reset = hns3_get_reset_level(hns, &hw->reset.request);
5155 if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) {
5156 hns3_warn(hw, "High level reset %d is request", reset);
5163 hns3_wait_hardware_ready(struct hns3_adapter *hns)
5165 struct hns3_hw *hw = &hns->hw;
5166 struct hns3_wait_data *wait_data = hw->reset.wait_data;
5169 if (wait_data->result == HNS3_WAIT_SUCCESS)
5171 else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
5172 gettimeofday(&tv, NULL);
5173 hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
5174 tv.tv_sec, tv.tv_usec);
5176 } else if (wait_data->result == HNS3_WAIT_REQUEST)
5179 wait_data->hns = hns;
5180 wait_data->check_completion = is_pf_reset_done;
5181 wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT *
5182 HNS3_RESET_WAIT_MS + get_timeofday_ms();
5183 wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC;
5184 wait_data->count = HNS3_RESET_WAIT_CNT;
5185 wait_data->result = HNS3_WAIT_REQUEST;
5186 rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
5191 hns3_func_reset_cmd(struct hns3_hw *hw, int func_id)
5193 struct hns3_cmd_desc desc;
5194 struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data;
5196 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
5197 hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1);
5198 req->fun_reset_vfid = func_id;
5200 return hns3_cmd_send(hw, &desc, 1);
5204 hns3_imp_reset_cmd(struct hns3_hw *hw)
5206 struct hns3_cmd_desc desc;
5208 hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false);
5209 desc.data[0] = 0xeedd;
5211 return hns3_cmd_send(hw, &desc, 1);
5215 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level)
5217 struct hns3_hw *hw = &hns->hw;
5221 gettimeofday(&tv, NULL);
5222 if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) ||
5223 hns3_read_dev(hw, HNS3_FUN_RST_ING)) {
5224 hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld",
5225 tv.tv_sec, tv.tv_usec);
5229 switch (reset_level) {
5230 case HNS3_IMP_RESET:
5231 hns3_imp_reset_cmd(hw);
5232 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld",
5233 tv.tv_sec, tv.tv_usec);
5235 case HNS3_GLOBAL_RESET:
5236 val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
5237 hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1);
5238 hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val);
5239 hns3_warn(hw, "Global Reset requested time=%ld.%.6ld",
5240 tv.tv_sec, tv.tv_usec);
5242 case HNS3_FUNC_RESET:
5243 hns3_warn(hw, "PF Reset requested time=%ld.%.6ld",
5244 tv.tv_sec, tv.tv_usec);
5245 /* schedule again to check later */
5246 hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending);
5247 hns3_schedule_reset(hns);
5250 hns3_warn(hw, "Unsupported reset level: %d", reset_level);
5253 hns3_atomic_clear_bit(reset_level, &hw->reset.request);
5256 static enum hns3_reset_level
5257 hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
5259 struct hns3_hw *hw = &hns->hw;
5260 enum hns3_reset_level reset_level = HNS3_NONE_RESET;
5262 /* Return the highest priority reset level amongst all */
5263 if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels))
5264 reset_level = HNS3_IMP_RESET;
5265 else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
5266 reset_level = HNS3_GLOBAL_RESET;
5267 else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
5268 reset_level = HNS3_FUNC_RESET;
5269 else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
5270 reset_level = HNS3_FLR_RESET;
5272 if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
5273 return HNS3_NONE_RESET;
5279 hns3_record_imp_error(struct hns3_adapter *hns)
5281 struct hns3_hw *hw = &hns->hw;
5284 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
5285 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
5286 hns3_warn(hw, "Detected IMP RD poison!");
5287 hns3_error_int_stats_add(hns, "IMP_RD_POISON_INT_STS");
5288 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
5289 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
5292 if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
5293 hns3_warn(hw, "Detected IMP CMDQ error!");
5294 hns3_error_int_stats_add(hns, "CMDQ_MEM_ECC_INT_STS");
5295 hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
5296 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
5301 hns3_prepare_reset(struct hns3_adapter *hns)
5303 struct hns3_hw *hw = &hns->hw;
5307 switch (hw->reset.level) {
5308 case HNS3_FUNC_RESET:
5309 ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID);
5314 * After performaning pf reset, it is not necessary to do the
5315 * mailbox handling or send any command to firmware, because
5316 * any mailbox handling or command to firmware is only valid
5317 * after hns3_cmd_init is called.
5319 rte_atomic16_set(&hw->reset.disable_cmd, 1);
5320 hw->reset.stats.request_cnt++;
5322 case HNS3_IMP_RESET:
5323 hns3_record_imp_error(hns);
5324 reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
5325 hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
5326 BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
5335 hns3_set_rst_done(struct hns3_hw *hw)
5337 struct hns3_pf_rst_done_cmd *req;
5338 struct hns3_cmd_desc desc;
5340 req = (struct hns3_pf_rst_done_cmd *)desc.data;
5341 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
5342 req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
5343 return hns3_cmd_send(hw, &desc, 1);
5347 hns3_stop_service(struct hns3_adapter *hns)
5349 struct hns3_hw *hw = &hns->hw;
5350 struct rte_eth_dev *eth_dev;
5352 eth_dev = &rte_eth_devices[hw->data->port_id];
5353 if (hw->adapter_state == HNS3_NIC_STARTED)
5354 rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
5355 hw->mac.link_status = ETH_LINK_DOWN;
5357 hns3_set_rxtx_function(eth_dev);
5359 /* Disable datapath on secondary process. */
5360 hns3_mp_req_stop_rxtx(eth_dev);
5361 rte_delay_ms(hw->tqps_num);
5363 rte_spinlock_lock(&hw->lock);
5364 if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
5365 hw->adapter_state == HNS3_NIC_STOPPING) {
5367 hw->reset.mbuf_deferred_free = true;
5369 hw->reset.mbuf_deferred_free = false;
5372 * It is cumbersome for hardware to pick-and-choose entries for deletion
5373 * from table space. Hence, for function reset software intervention is
5374 * required to delete the entries
5376 if (rte_atomic16_read(&hw->reset.disable_cmd) == 0)
5377 hns3_configure_all_mc_mac_addr(hns, true);
5378 rte_spinlock_unlock(&hw->lock);
5384 hns3_start_service(struct hns3_adapter *hns)
5386 struct hns3_hw *hw = &hns->hw;
5387 struct rte_eth_dev *eth_dev;
5389 if (hw->reset.level == HNS3_IMP_RESET ||
5390 hw->reset.level == HNS3_GLOBAL_RESET)
5391 hns3_set_rst_done(hw);
5392 eth_dev = &rte_eth_devices[hw->data->port_id];
5393 hns3_set_rxtx_function(eth_dev);
5394 hns3_mp_req_start_rxtx(eth_dev);
5395 if (hw->adapter_state == HNS3_NIC_STARTED) {
5396 hns3_service_handler(eth_dev);
5398 /* Enable interrupt of all rx queues before enabling queues */
5399 hns3_dev_all_rx_queue_intr_enable(hw, true);
5401 * When finished the initialization, enable queues to receive
5402 * and transmit packets.
5404 hns3_enable_all_queues(hw, true);
5411 hns3_restore_conf(struct hns3_adapter *hns)
5413 struct hns3_hw *hw = &hns->hw;
5416 ret = hns3_configure_all_mac_addr(hns, false);
5420 ret = hns3_configure_all_mc_mac_addr(hns, false);
5424 ret = hns3_dev_promisc_restore(hns);
5428 ret = hns3_restore_vlan_table(hns);
5432 ret = hns3_restore_vlan_conf(hns);
5436 ret = hns3_restore_all_fdir_filter(hns);
5440 ret = hns3_restore_rx_interrupt(hw);
5444 ret = hns3_restore_gro_conf(hw);
5448 if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
5449 ret = hns3_do_start(hns, false);
5452 hns3_info(hw, "hns3 dev restart successful!");
5453 } else if (hw->adapter_state == HNS3_NIC_STOPPING)
5454 hw->adapter_state = HNS3_NIC_CONFIGURED;
5458 hns3_configure_all_mc_mac_addr(hns, true);
5460 hns3_configure_all_mac_addr(hns, true);
5465 hns3_reset_service(void *param)
5467 struct hns3_adapter *hns = (struct hns3_adapter *)param;
5468 struct hns3_hw *hw = &hns->hw;
5469 enum hns3_reset_level reset_level;
5470 struct timeval tv_delta;
5471 struct timeval tv_start;
5477 * The interrupt is not triggered within the delay time.
5478 * The interrupt may have been lost. It is necessary to handle
5479 * the interrupt to recover from the error.
5481 if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) {
5482 rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
5483 hns3_err(hw, "Handling interrupts in delayed tasks");
5484 hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
5485 reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
5486 if (reset_level == HNS3_NONE_RESET) {
5487 hns3_err(hw, "No reset level is set, try IMP reset");
5488 hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
5491 rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
5494 * Check if there is any ongoing reset in the hardware. This status can
5495 * be checked from reset_pending. If there is then, we need to wait for
5496 * hardware to complete reset.
5497 * a. If we are able to figure out in reasonable time that hardware
5498 * has fully resetted then, we can proceed with driver, client
5500 * b. else, we can come back later to check this status so re-sched
5503 reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
5504 if (reset_level != HNS3_NONE_RESET) {
5505 gettimeofday(&tv_start, NULL);
5506 ret = hns3_reset_process(hns, reset_level);
5507 gettimeofday(&tv, NULL);
5508 timersub(&tv, &tv_start, &tv_delta);
5509 msec = tv_delta.tv_sec * MSEC_PER_SEC +
5510 tv_delta.tv_usec / USEC_PER_MSEC;
5511 if (msec > HNS3_RESET_PROCESS_MS)
5512 hns3_err(hw, "%d handle long time delta %" PRIx64
5513 " ms time=%ld.%.6ld",
5514 hw->reset.level, msec,
5515 tv.tv_sec, tv.tv_usec);
5520 /* Check if we got any *new* reset requests to be honored */
5521 reset_level = hns3_get_reset_level(hns, &hw->reset.request);
5522 if (reset_level != HNS3_NONE_RESET)
5523 hns3_msix_process(hns, reset_level);
5526 static const struct eth_dev_ops hns3_eth_dev_ops = {
5527 .dev_configure = hns3_dev_configure,
5528 .dev_start = hns3_dev_start,
5529 .dev_stop = hns3_dev_stop,
5530 .dev_close = hns3_dev_close,
5531 .promiscuous_enable = hns3_dev_promiscuous_enable,
5532 .promiscuous_disable = hns3_dev_promiscuous_disable,
5533 .allmulticast_enable = hns3_dev_allmulticast_enable,
5534 .allmulticast_disable = hns3_dev_allmulticast_disable,
5535 .mtu_set = hns3_dev_mtu_set,
5536 .stats_get = hns3_stats_get,
5537 .stats_reset = hns3_stats_reset,
5538 .xstats_get = hns3_dev_xstats_get,
5539 .xstats_get_names = hns3_dev_xstats_get_names,
5540 .xstats_reset = hns3_dev_xstats_reset,
5541 .xstats_get_by_id = hns3_dev_xstats_get_by_id,
5542 .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
5543 .dev_infos_get = hns3_dev_infos_get,
5544 .fw_version_get = hns3_fw_version_get,
5545 .rx_queue_setup = hns3_rx_queue_setup,
5546 .tx_queue_setup = hns3_tx_queue_setup,
5547 .rx_queue_release = hns3_dev_rx_queue_release,
5548 .tx_queue_release = hns3_dev_tx_queue_release,
5549 .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
5550 .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
5551 .rxq_info_get = hns3_rxq_info_get,
5552 .txq_info_get = hns3_txq_info_get,
5553 .rx_burst_mode_get = hns3_rx_burst_mode_get,
5554 .tx_burst_mode_get = hns3_tx_burst_mode_get,
5555 .flow_ctrl_get = hns3_flow_ctrl_get,
5556 .flow_ctrl_set = hns3_flow_ctrl_set,
5557 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,
5558 .mac_addr_add = hns3_add_mac_addr,
5559 .mac_addr_remove = hns3_remove_mac_addr,
5560 .mac_addr_set = hns3_set_default_mac_addr,
5561 .set_mc_addr_list = hns3_set_mc_mac_addr_list,
5562 .link_update = hns3_dev_link_update,
5563 .rss_hash_update = hns3_dev_rss_hash_update,
5564 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
5565 .reta_update = hns3_dev_rss_reta_update,
5566 .reta_query = hns3_dev_rss_reta_query,
5567 .filter_ctrl = hns3_dev_filter_ctrl,
5568 .vlan_filter_set = hns3_vlan_filter_set,
5569 .vlan_tpid_set = hns3_vlan_tpid_set,
5570 .vlan_offload_set = hns3_vlan_offload_set,
5571 .vlan_pvid_set = hns3_vlan_pvid_set,
5572 .get_reg = hns3_get_regs,
5573 .get_dcb_info = hns3_get_dcb_info,
5574 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
5577 static const struct hns3_reset_ops hns3_reset_ops = {
5578 .reset_service = hns3_reset_service,
5579 .stop_service = hns3_stop_service,
5580 .prepare_reset = hns3_prepare_reset,
5581 .wait_hardware_ready = hns3_wait_hardware_ready,
5582 .reinit_dev = hns3_reinit_dev,
5583 .restore_conf = hns3_restore_conf,
5584 .start_service = hns3_start_service,
5588 hns3_dev_init(struct rte_eth_dev *eth_dev)
5590 struct hns3_adapter *hns = eth_dev->data->dev_private;
5591 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
5592 struct rte_ether_addr *eth_addr;
5593 struct hns3_hw *hw = &hns->hw;
5596 PMD_INIT_FUNC_TRACE();
5598 eth_dev->process_private = (struct hns3_process_private *)
5599 rte_zmalloc_socket("hns3_filter_list",
5600 sizeof(struct hns3_process_private),
5601 RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
5602 if (eth_dev->process_private == NULL) {
5603 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
5606 /* initialize flow filter lists */
5607 hns3_filterlist_init(eth_dev);
5609 hns3_set_rxtx_function(eth_dev);
5610 eth_dev->dev_ops = &hns3_eth_dev_ops;
5611 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
5612 ret = hns3_mp_init_secondary();
5614 PMD_INIT_LOG(ERR, "Failed to init for secondary "
5615 "process, ret = %d", ret);
5616 goto err_mp_init_secondary;
5619 hw->secondary_cnt++;
5623 ret = hns3_mp_init_primary();
5626 "Failed to init for primary process, ret = %d",
5628 goto err_mp_init_primary;
5631 hw->adapter_state = HNS3_NIC_UNINITIALIZED;
5633 hw->data = eth_dev->data;
5636 * Set default max packet size according to the mtu
5637 * default vale in DPDK frame.
5639 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
5641 ret = hns3_reset_init(hw);
5643 goto err_init_reset;
5644 hw->reset.ops = &hns3_reset_ops;
5646 ret = hns3_init_pf(eth_dev);
5648 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
5652 /* Allocate memory for storing MAC addresses */
5653 eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac",
5654 sizeof(struct rte_ether_addr) *
5655 HNS3_UC_MACADDR_NUM, 0);
5656 if (eth_dev->data->mac_addrs == NULL) {
5657 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
5658 "to store MAC addresses",
5659 sizeof(struct rte_ether_addr) *
5660 HNS3_UC_MACADDR_NUM);
5662 goto err_rte_zmalloc;
5665 eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
5666 if (!rte_is_valid_assigned_ether_addr(eth_addr)) {
5667 rte_eth_random_addr(hw->mac.mac_addr);
5668 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
5669 (struct rte_ether_addr *)hw->mac.mac_addr);
5670 hns3_warn(hw, "default mac_addr from firmware is an invalid "
5671 "unicast address, using random MAC address %s",
5674 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
5675 ð_dev->data->mac_addrs[0]);
5677 hw->adapter_state = HNS3_NIC_INITIALIZED;
5679 * Pass the information to the rte_eth_dev_close() that it should also
5680 * release the private port resources.
5682 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
5684 if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) {
5685 hns3_err(hw, "Reschedule reset service after dev_init");
5686 hns3_schedule_reset(hns);
5688 /* IMP will wait ready flag before reset */
5689 hns3_notify_reset_ready(hw, false);
5692 hns3_info(hw, "hns3 dev initialization successful!");
5696 hns3_uninit_pf(eth_dev);
5699 rte_free(hw->reset.wait_data);
5702 hns3_mp_uninit_primary();
5704 err_mp_init_primary:
5705 err_mp_init_secondary:
5706 eth_dev->dev_ops = NULL;
5707 eth_dev->rx_pkt_burst = NULL;
5708 eth_dev->tx_pkt_burst = NULL;
5709 eth_dev->tx_pkt_prepare = NULL;
5710 rte_free(eth_dev->process_private);
5711 eth_dev->process_private = NULL;
5716 hns3_dev_uninit(struct rte_eth_dev *eth_dev)
5718 struct hns3_adapter *hns = eth_dev->data->dev_private;
5719 struct hns3_hw *hw = &hns->hw;
5721 PMD_INIT_FUNC_TRACE();
5723 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5726 eth_dev->dev_ops = NULL;
5727 eth_dev->rx_pkt_burst = NULL;
5728 eth_dev->tx_pkt_burst = NULL;
5729 eth_dev->tx_pkt_prepare = NULL;
5730 if (hw->adapter_state < HNS3_NIC_CLOSING)
5731 hns3_dev_close(eth_dev);
5733 hw->adapter_state = HNS3_NIC_REMOVED;
5738 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5739 struct rte_pci_device *pci_dev)
5741 return rte_eth_dev_pci_generic_probe(pci_dev,
5742 sizeof(struct hns3_adapter),
5747 eth_hns3_pci_remove(struct rte_pci_device *pci_dev)
5749 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit);
5752 static const struct rte_pci_id pci_id_hns3_map[] = {
5753 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) },
5754 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) },
5755 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) },
5756 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
5757 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
5758 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) },
5759 { .vendor_id = 0, /* sentinel */ },
5762 static struct rte_pci_driver rte_hns3_pmd = {
5763 .id_table = pci_id_hns3_map,
5764 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
5765 .probe = eth_hns3_pci_probe,
5766 .remove = eth_hns3_pci_remove,
5769 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
5770 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
5771 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
5772 RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE);
5773 RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE);