1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
12 #include <rte_bus_pci.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_ethdev_pci.h>
20 #include <rte_interrupts.h>
25 #include "hns3_ethdev.h"
26 #include "hns3_logs.h"
27 #include "hns3_rxtx.h"
28 #include "hns3_intr.h"
29 #include "hns3_regs.h"
32 #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32
33 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1
35 #define HNS3_SERVICE_INTERVAL 1000000 /* us */
36 #define HNS3_PORT_BASE_VLAN_DISABLE 0
37 #define HNS3_PORT_BASE_VLAN_ENABLE 1
38 #define HNS3_INVLID_PVID 0xFFFF
40 #define HNS3_FILTER_TYPE_VF 0
41 #define HNS3_FILTER_TYPE_PORT 1
42 #define HNS3_FILTER_FE_EGRESS_V1_B BIT(0)
43 #define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0)
44 #define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1)
45 #define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2)
46 #define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3)
47 #define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \
48 | HNS3_FILTER_FE_ROCE_EGRESS_B)
49 #define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \
50 | HNS3_FILTER_FE_ROCE_INGRESS_B)
52 int hns3_logtype_init;
53 int hns3_logtype_driver;
56 HNS3_VECTOR0_EVENT_RST,
57 HNS3_VECTOR0_EVENT_MBX,
58 HNS3_VECTOR0_EVENT_ERR,
59 HNS3_VECTOR0_EVENT_OTHER,
62 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
63 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
67 hns3_pf_disable_irq0(struct hns3_hw *hw)
69 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
73 hns3_pf_enable_irq0(struct hns3_hw *hw)
75 hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
78 static enum hns3_evt_cause
79 hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
81 struct hns3_hw *hw = &hns->hw;
82 uint32_t vector0_int_stats;
83 uint32_t cmdq_src_val;
85 enum hns3_evt_cause ret;
87 /* fetch the events from their corresponding regs */
88 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
89 cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
92 * Assumption: If by any chance reset and mailbox events are reported
93 * together then we will only process reset event and defer the
94 * processing of the mailbox events. Since, we would have not cleared
95 * RX CMDQ event this time we would receive again another interrupt
96 * from H/W just for the mailbox.
98 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */
99 val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
100 ret = HNS3_VECTOR0_EVENT_RST;
105 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) {
106 val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
107 ret = HNS3_VECTOR0_EVENT_RST;
111 /* check for vector0 msix event source */
112 if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK) {
113 val = vector0_int_stats;
114 ret = HNS3_VECTOR0_EVENT_ERR;
118 /* check for vector0 mailbox(=CMDQ RX) event source */
119 if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) {
120 cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
122 ret = HNS3_VECTOR0_EVENT_MBX;
126 if (clearval && (vector0_int_stats || cmdq_src_val))
127 hns3_warn(hw, "surprise irq ector0_int_stats:0x%x cmdq_src_val:0x%x",
128 vector0_int_stats, cmdq_src_val);
129 val = vector0_int_stats;
130 ret = HNS3_VECTOR0_EVENT_OTHER;
139 hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
141 if (event_type == HNS3_VECTOR0_EVENT_RST)
142 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr);
143 else if (event_type == HNS3_VECTOR0_EVENT_MBX)
144 hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
148 hns3_clear_all_event_cause(struct hns3_hw *hw)
150 uint32_t vector0_int_stats;
151 vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
153 if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats)
154 hns3_warn(hw, "Probe during IMP reset interrupt");
156 if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats)
157 hns3_warn(hw, "Probe during Global reset interrupt");
159 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST,
160 BIT(HNS3_VECTOR0_IMPRESET_INT_B) |
161 BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) |
162 BIT(HNS3_VECTOR0_CORERESET_INT_B));
163 hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0);
167 hns3_interrupt_handler(void *param)
169 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
170 struct hns3_adapter *hns = dev->data->dev_private;
171 struct hns3_hw *hw = &hns->hw;
172 enum hns3_evt_cause event_cause;
173 uint32_t clearval = 0;
175 /* Disable interrupt */
176 hns3_pf_disable_irq0(hw);
178 event_cause = hns3_check_event_cause(hns, &clearval);
180 hns3_clear_event_cause(hw, event_cause, clearval);
181 /* Enable interrupt if it is not cause by reset */
182 hns3_pf_enable_irq0(hw);
186 hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on)
188 #define HNS3_VLAN_OFFSET_160 160
189 struct hns3_vlan_filter_pf_cfg_cmd *req;
190 struct hns3_hw *hw = &hns->hw;
191 uint8_t vlan_offset_byte_val;
192 struct hns3_cmd_desc desc;
193 uint8_t vlan_offset_byte;
194 uint8_t vlan_offset_160;
197 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false);
199 vlan_offset_160 = vlan_id / HNS3_VLAN_OFFSET_160;
200 vlan_offset_byte = (vlan_id % HNS3_VLAN_OFFSET_160) / 8;
201 vlan_offset_byte_val = 1 << (vlan_id % 8);
203 req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data;
204 req->vlan_offset = vlan_offset_160;
205 req->vlan_cfg = on ? 0 : 1;
206 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
208 ret = hns3_cmd_send(hw, &desc, 1);
210 hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d",
217 hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id)
219 struct hns3_user_vlan_table *vlan_entry;
220 struct hns3_pf *pf = &hns->pf;
222 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
223 if (vlan_entry->vlan_id == vlan_id) {
224 if (vlan_entry->hd_tbl_status)
225 hns3_set_port_vlan_filter(hns, vlan_id, 0);
226 LIST_REMOVE(vlan_entry, next);
227 rte_free(vlan_entry);
234 hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
237 struct hns3_user_vlan_table *vlan_entry;
238 struct hns3_hw *hw = &hns->hw;
239 struct hns3_pf *pf = &hns->pf;
241 vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0);
242 if (vlan_entry == NULL) {
243 hns3_err(hw, "Failed to malloc hns3 vlan table");
247 vlan_entry->hd_tbl_status = writen_to_tbl;
248 vlan_entry->vlan_id = vlan_id;
250 LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next);
254 hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
256 struct hns3_pf *pf = &hns->pf;
257 bool writen_to_tbl = false;
261 * When vlan filter is enabled, hardware regards vlan id 0 as the entry
262 * for normal packet, deleting vlan id 0 is not allowed.
264 if (on == 0 && vlan_id == 0)
268 * When port base vlan enabled, we use port base vlan as the vlan
269 * filter condition. In this case, we don't update vlan filter table
270 * when user add new vlan or remove exist vlan, just update the
271 * vlan list. The vlan id in vlan list will be writen in vlan filter
272 * table until port base vlan disabled
274 if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
275 ret = hns3_set_port_vlan_filter(hns, vlan_id, on);
276 writen_to_tbl = true;
279 if (ret == 0 && vlan_id) {
281 hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);
283 hns3_rm_dev_vlan_table(hns, vlan_id);
289 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
291 struct hns3_adapter *hns = dev->data->dev_private;
292 struct hns3_hw *hw = &hns->hw;
295 rte_spinlock_lock(&hw->lock);
296 ret = hns3_vlan_filter_configure(hns, vlan_id, on);
297 rte_spinlock_unlock(&hw->lock);
302 hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type,
305 struct hns3_rx_vlan_type_cfg_cmd *rx_req;
306 struct hns3_tx_vlan_type_cfg_cmd *tx_req;
307 struct hns3_hw *hw = &hns->hw;
308 struct hns3_cmd_desc desc;
311 if ((vlan_type != ETH_VLAN_TYPE_INNER &&
312 vlan_type != ETH_VLAN_TYPE_OUTER)) {
313 hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type);
317 if (tpid != RTE_ETHER_TYPE_VLAN) {
318 hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type);
322 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false);
323 rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data;
325 if (vlan_type == ETH_VLAN_TYPE_OUTER) {
326 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
327 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
328 } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
329 rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid);
330 rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid);
331 rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid);
332 rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid);
335 ret = hns3_cmd_send(hw, &desc, 1);
337 hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d",
342 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false);
344 tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data;
345 tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid);
346 tx_req->in_vlan_type = rte_cpu_to_le_16(tpid);
348 ret = hns3_cmd_send(hw, &desc, 1);
350 hns3_err(hw, "Send txvlan protocol type command fail, ret =%d",
356 hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
359 struct hns3_adapter *hns = dev->data->dev_private;
360 struct hns3_hw *hw = &hns->hw;
363 rte_spinlock_lock(&hw->lock);
364 ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid);
365 rte_spinlock_unlock(&hw->lock);
370 hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
371 struct hns3_rx_vtag_cfg *vcfg)
373 struct hns3_vport_vtag_rx_cfg_cmd *req;
374 struct hns3_hw *hw = &hns->hw;
375 struct hns3_cmd_desc desc;
380 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false);
382 req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data;
383 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B,
384 vcfg->strip_tag1_en ? 1 : 0);
385 hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B,
386 vcfg->strip_tag2_en ? 1 : 0);
387 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B,
388 vcfg->vlan1_vlan_prionly ? 1 : 0);
389 hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
390 vcfg->vlan2_vlan_prionly ? 1 : 0);
393 * In current version VF is not supported when PF is driven by DPDK
394 * driver, the PF-related vf_id is 0, just need to configure parameters
398 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
399 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
400 req->vf_bitmap[req->vf_offset] = bitmap;
402 ret = hns3_cmd_send(hw, &desc, 1);
404 hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret);
409 hns3_update_rx_offload_cfg(struct hns3_adapter *hns,
410 struct hns3_rx_vtag_cfg *vcfg)
412 struct hns3_pf *pf = &hns->pf;
413 memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg));
417 hns3_update_tx_offload_cfg(struct hns3_adapter *hns,
418 struct hns3_tx_vtag_cfg *vcfg)
420 struct hns3_pf *pf = &hns->pf;
421 memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg));
425 hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable)
427 struct hns3_rx_vtag_cfg rxvlan_cfg;
428 struct hns3_pf *pf = &hns->pf;
429 struct hns3_hw *hw = &hns->hw;
432 if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
433 rxvlan_cfg.strip_tag1_en = false;
434 rxvlan_cfg.strip_tag2_en = enable;
436 rxvlan_cfg.strip_tag1_en = enable;
437 rxvlan_cfg.strip_tag2_en = true;
440 rxvlan_cfg.vlan1_vlan_prionly = false;
441 rxvlan_cfg.vlan2_vlan_prionly = false;
442 rxvlan_cfg.rx_vlan_offload_en = enable;
444 ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg);
446 hns3_err(hw, "enable strip rx vtag failed, ret =%d", ret);
450 hns3_update_rx_offload_cfg(hns, &rxvlan_cfg);
456 hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type,
457 uint8_t fe_type, bool filter_en, uint8_t vf_id)
459 struct hns3_vlan_filter_ctrl_cmd *req;
460 struct hns3_cmd_desc desc;
463 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false);
465 req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data;
466 req->vlan_type = vlan_type;
467 req->vlan_fe = filter_en ? fe_type : 0;
470 ret = hns3_cmd_send(hw, &desc, 1);
472 hns3_err(hw, "set vlan filter fail, ret =%d", ret);
478 hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable)
480 struct hns3_hw *hw = &hns->hw;
483 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF,
484 HNS3_FILTER_FE_EGRESS, false, 0);
486 hns3_err(hw, "hns3 enable filter fail, ret =%d", ret);
490 ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT,
491 HNS3_FILTER_FE_INGRESS, enable, 0);
493 hns3_err(hw, "hns3 enable filter fail, ret =%d", ret);
499 hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
501 struct hns3_adapter *hns = dev->data->dev_private;
502 struct hns3_hw *hw = &hns->hw;
503 struct rte_eth_rxmode *rxmode;
504 unsigned int tmp_mask;
508 rte_spinlock_lock(&hw->lock);
509 rxmode = &dev->data->dev_conf.rxmode;
510 tmp_mask = (unsigned int)mask;
511 if (tmp_mask & ETH_VLAN_STRIP_MASK) {
512 /* Enable or disable VLAN stripping */
513 enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ?
516 ret = hns3_en_hw_strip_rxvtag(hns, enable);
518 rte_spinlock_unlock(&hw->lock);
519 hns3_err(hw, "failed to enable rx strip, ret =%d", ret);
524 rte_spinlock_unlock(&hw->lock);
530 hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
531 struct hns3_tx_vtag_cfg *vcfg)
533 struct hns3_vport_vtag_tx_cfg_cmd *req;
534 struct hns3_cmd_desc desc;
535 struct hns3_hw *hw = &hns->hw;
540 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false);
542 req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data;
543 req->def_vlan_tag1 = vcfg->default_tag1;
544 req->def_vlan_tag2 = vcfg->default_tag2;
545 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B,
546 vcfg->accept_tag1 ? 1 : 0);
547 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B,
548 vcfg->accept_untag1 ? 1 : 0);
549 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B,
550 vcfg->accept_tag2 ? 1 : 0);
551 hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B,
552 vcfg->accept_untag2 ? 1 : 0);
553 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B,
554 vcfg->insert_tag1_en ? 1 : 0);
555 hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B,
556 vcfg->insert_tag2_en ? 1 : 0);
557 hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
560 * In current version VF is not supported when PF is driven by DPDK
561 * driver, the PF-related vf_id is 0, just need to configure parameters
565 req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD;
566 bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE);
567 req->vf_bitmap[req->vf_offset] = bitmap;
569 ret = hns3_cmd_send(hw, &desc, 1);
571 hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret);
577 hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state,
580 struct hns3_hw *hw = &hns->hw;
581 struct hns3_tx_vtag_cfg txvlan_cfg;
584 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) {
585 txvlan_cfg.accept_tag1 = true;
586 txvlan_cfg.insert_tag1_en = false;
587 txvlan_cfg.default_tag1 = 0;
589 txvlan_cfg.accept_tag1 = false;
590 txvlan_cfg.insert_tag1_en = true;
591 txvlan_cfg.default_tag1 = pvid;
594 txvlan_cfg.accept_untag1 = true;
595 txvlan_cfg.accept_tag2 = true;
596 txvlan_cfg.accept_untag2 = true;
597 txvlan_cfg.insert_tag2_en = false;
598 txvlan_cfg.default_tag2 = 0;
600 ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg);
602 hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid,
607 hns3_update_tx_offload_cfg(hns, &txvlan_cfg);
612 hns3_store_port_base_vlan_info(struct hns3_adapter *hns, uint16_t pvid, int on)
614 struct hns3_pf *pf = &hns->pf;
616 pf->port_base_vlan_cfg.state = on ?
617 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
619 pf->port_base_vlan_cfg.pvid = pvid;
623 hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list)
625 struct hns3_user_vlan_table *vlan_entry;
626 struct hns3_pf *pf = &hns->pf;
628 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
629 if (vlan_entry->hd_tbl_status)
630 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0);
632 vlan_entry->hd_tbl_status = false;
636 vlan_entry = LIST_FIRST(&pf->vlan_list);
638 LIST_REMOVE(vlan_entry, next);
639 rte_free(vlan_entry);
640 vlan_entry = LIST_FIRST(&pf->vlan_list);
646 hns3_add_all_vlan_table(struct hns3_adapter *hns)
648 struct hns3_user_vlan_table *vlan_entry;
649 struct hns3_pf *pf = &hns->pf;
651 LIST_FOREACH(vlan_entry, &pf->vlan_list, next) {
652 if (!vlan_entry->hd_tbl_status)
653 hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1);
655 vlan_entry->hd_tbl_status = true;
660 hns3_remove_all_vlan_table(struct hns3_adapter *hns)
662 struct hns3_hw *hw = &hns->hw;
663 struct hns3_pf *pf = &hns->pf;
666 hns3_rm_all_vlan_table(hns, true);
667 if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) {
668 ret = hns3_set_port_vlan_filter(hns,
669 pf->port_base_vlan_cfg.pvid, 0);
671 hns3_err(hw, "Failed to remove all vlan table, ret =%d",
679 hns3_update_vlan_filter_entries(struct hns3_adapter *hns,
680 uint16_t port_base_vlan_state,
681 uint16_t new_pvid, uint16_t old_pvid)
683 struct hns3_pf *pf = &hns->pf;
684 struct hns3_hw *hw = &hns->hw;
687 if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) {
688 if (old_pvid != HNS3_INVLID_PVID && old_pvid != 0) {
689 ret = hns3_set_port_vlan_filter(hns, old_pvid, 0);
692 "Failed to clear clear old pvid filter, ret =%d",
698 hns3_rm_all_vlan_table(hns, false);
699 return hns3_set_port_vlan_filter(hns, new_pvid, 1);
703 ret = hns3_set_port_vlan_filter(hns, new_pvid, 0);
705 hns3_err(hw, "Failed to set port vlan filter, ret =%d",
711 if (new_pvid == pf->port_base_vlan_cfg.pvid)
712 hns3_add_all_vlan_table(hns);
718 hns3_en_rx_strip_all(struct hns3_adapter *hns, int on)
720 struct hns3_rx_vtag_cfg rx_vlan_cfg;
721 struct hns3_hw *hw = &hns->hw;
725 rx_strip_en = on ? true : false;
726 rx_vlan_cfg.strip_tag1_en = rx_strip_en;
727 rx_vlan_cfg.strip_tag2_en = rx_strip_en;
728 rx_vlan_cfg.vlan1_vlan_prionly = false;
729 rx_vlan_cfg.vlan2_vlan_prionly = false;
730 rx_vlan_cfg.rx_vlan_offload_en = rx_strip_en;
732 ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg);
734 hns3_err(hw, "enable strip rx failed, ret =%d", ret);
738 hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg);
743 hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on)
745 struct hns3_pf *pf = &hns->pf;
746 struct hns3_hw *hw = &hns->hw;
747 uint16_t port_base_vlan_state;
751 if (on == 0 && pvid != pf->port_base_vlan_cfg.pvid) {
752 if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID)
753 hns3_warn(hw, "Invalid operation! As current pvid set "
754 "is %u, disable pvid %u is invalid",
755 pf->port_base_vlan_cfg.pvid, pvid);
759 port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE :
760 HNS3_PORT_BASE_VLAN_DISABLE;
761 ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid);
763 hns3_err(hw, "Failed to config tx vlan, ret =%d", ret);
767 ret = hns3_en_rx_strip_all(hns, on);
769 hns3_err(hw, "Failed to config rx vlan strip, ret =%d", ret);
773 if (pvid == HNS3_INVLID_PVID)
775 old_pvid = pf->port_base_vlan_cfg.pvid;
776 ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid,
779 hns3_err(hw, "Failed to update vlan filter entries, ret =%d",
785 hns3_store_port_base_vlan_info(hns, pvid, on);
790 hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
792 struct hns3_adapter *hns = dev->data->dev_private;
793 struct hns3_hw *hw = &hns->hw;
796 rte_spinlock_lock(&hw->lock);
797 ret = hns3_vlan_pvid_configure(hns, pvid, on);
798 rte_spinlock_unlock(&hw->lock);
803 init_port_base_vlan_info(struct hns3_hw *hw)
805 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
806 struct hns3_pf *pf = &hns->pf;
808 pf->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
809 pf->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID;
813 hns3_default_vlan_config(struct hns3_adapter *hns)
815 struct hns3_hw *hw = &hns->hw;
818 ret = hns3_set_port_vlan_filter(hns, 0, 1);
820 hns3_err(hw, "default vlan 0 config failed, ret =%d", ret);
825 hns3_init_vlan_config(struct hns3_adapter *hns)
827 struct hns3_hw *hw = &hns->hw;
831 * This function can be called in the initialization and reset process,
832 * when in reset process, it means that hardware had been reseted
833 * successfully and we need to restore the hardware configuration to
834 * ensure that the hardware configuration remains unchanged before and
837 if (rte_atomic16_read(&hw->reset.resetting) == 0)
838 init_port_base_vlan_info(hw);
840 ret = hns3_enable_vlan_filter(hns, true);
842 hns3_err(hw, "vlan init fail in pf, ret =%d", ret);
846 ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER,
847 RTE_ETHER_TYPE_VLAN);
849 hns3_err(hw, "tpid set fail in pf, ret =%d", ret);
854 * When in the reinit dev stage of the reset process, the following
855 * vlan-related configurations may differ from those at initialization,
856 * we will restore configurations to hardware in hns3_restore_vlan_table
857 * and hns3_restore_vlan_conf later.
859 if (rte_atomic16_read(&hw->reset.resetting) == 0) {
860 ret = hns3_vlan_pvid_configure(hns, HNS3_INVLID_PVID, 0);
862 hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
866 ret = hns3_en_hw_strip_rxvtag(hns, false);
868 hns3_err(hw, "rx strip configure fail in pf, ret =%d",
874 return hns3_default_vlan_config(hns);
878 hns3_dev_configure_vlan(struct rte_eth_dev *dev)
880 struct hns3_adapter *hns = dev->data->dev_private;
881 struct rte_eth_dev_data *data = dev->data;
882 struct rte_eth_txmode *txmode;
883 struct hns3_hw *hw = &hns->hw;
886 txmode = &data->dev_conf.txmode;
887 if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged)
889 "hw_vlan_reject_tagged or hw_vlan_reject_untagged "
890 "configuration is not supported! Ignore these two "
891 "parameters: hw_vlan_reject_tagged(%d), "
892 "hw_vlan_reject_untagged(%d)",
893 txmode->hw_vlan_reject_tagged,
894 txmode->hw_vlan_reject_untagged);
896 /* Apply vlan offload setting */
897 ret = hns3_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
899 hns3_err(hw, "dev config vlan Strip failed, ret =%d", ret);
903 /* Apply pvid setting */
904 ret = hns3_vlan_pvid_set(dev, txmode->pvid,
905 txmode->hw_vlan_insert_pvid);
907 hns3_err(hw, "dev config vlan pvid(%d) failed, ret =%d",
914 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
915 unsigned int tso_mss_max)
917 struct hns3_cfg_tso_status_cmd *req;
918 struct hns3_cmd_desc desc;
921 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false);
923 req = (struct hns3_cfg_tso_status_cmd *)desc.data;
926 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
928 req->tso_mss_min = rte_cpu_to_le_16(tso_mss);
931 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
933 req->tso_mss_max = rte_cpu_to_le_16(tso_mss);
935 return hns3_cmd_send(hw, &desc, 1);
939 hns3_config_gro(struct hns3_hw *hw, bool en)
941 struct hns3_cfg_gro_status_cmd *req;
942 struct hns3_cmd_desc desc;
945 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
946 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
948 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
950 ret = hns3_cmd_send(hw, &desc, 1);
952 hns3_err(hw, "GRO hardware config cmd failed, ret = %d", ret);
958 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size,
959 uint16_t *allocated_size, bool is_alloc)
961 struct hns3_umv_spc_alc_cmd *req;
962 struct hns3_cmd_desc desc;
965 req = (struct hns3_umv_spc_alc_cmd *)desc.data;
966 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false);
967 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1);
968 req->space_size = rte_cpu_to_le_32(space_size);
970 ret = hns3_cmd_send(hw, &desc, 1);
972 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d",
973 is_alloc ? "allocate" : "free", ret);
977 if (is_alloc && allocated_size)
978 *allocated_size = rte_le_to_cpu_32(desc.data[1]);
984 hns3_init_umv_space(struct hns3_hw *hw)
986 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
987 struct hns3_pf *pf = &hns->pf;
988 uint16_t allocated_size = 0;
991 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size,
996 if (allocated_size < pf->wanted_umv_size)
997 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u",
998 pf->wanted_umv_size, allocated_size);
1000 pf->max_umv_size = (!!allocated_size) ? allocated_size :
1001 pf->wanted_umv_size;
1002 pf->used_umv_size = 0;
1007 hns3_uninit_umv_space(struct hns3_hw *hw)
1009 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1010 struct hns3_pf *pf = &hns->pf;
1013 if (pf->max_umv_size == 0)
1016 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false);
1020 pf->max_umv_size = 0;
1026 hns3_is_umv_space_full(struct hns3_hw *hw)
1028 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1029 struct hns3_pf *pf = &hns->pf;
1032 is_full = (pf->used_umv_size >= pf->max_umv_size);
1038 hns3_update_umv_space(struct hns3_hw *hw, bool is_free)
1040 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1041 struct hns3_pf *pf = &hns->pf;
1044 if (pf->used_umv_size > 0)
1045 pf->used_umv_size--;
1047 pf->used_umv_size++;
1051 hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req,
1052 const uint8_t *addr, bool is_mc)
1054 const unsigned char *mac_addr = addr;
1055 uint32_t high_val = ((uint32_t)mac_addr[3] << 24) |
1056 ((uint32_t)mac_addr[2] << 16) |
1057 ((uint32_t)mac_addr[1] << 8) |
1058 (uint32_t)mac_addr[0];
1059 uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4];
1061 hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1063 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1064 hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1);
1065 hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1);
1068 new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val);
1069 new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff);
1073 hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp,
1075 enum hns3_mac_vlan_tbl_opcode op)
1078 hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u",
1083 if (op == HNS3_MAC_VLAN_ADD) {
1084 if (resp_code == 0 || resp_code == 1) {
1086 } else if (resp_code == HNS3_ADD_UC_OVERFLOW) {
1087 hns3_err(hw, "add mac addr failed for uc_overflow");
1089 } else if (resp_code == HNS3_ADD_MC_OVERFLOW) {
1090 hns3_err(hw, "add mac addr failed for mc_overflow");
1094 hns3_err(hw, "add mac addr failed for undefined, code=%u",
1097 } else if (op == HNS3_MAC_VLAN_REMOVE) {
1098 if (resp_code == 0) {
1100 } else if (resp_code == 1) {
1101 hns3_dbg(hw, "remove mac addr failed for miss");
1105 hns3_err(hw, "remove mac addr failed for undefined, code=%u",
1108 } else if (op == HNS3_MAC_VLAN_LKUP) {
1109 if (resp_code == 0) {
1111 } else if (resp_code == 1) {
1112 hns3_dbg(hw, "lookup mac addr failed for miss");
1116 hns3_err(hw, "lookup mac addr failed for undefined, code=%u",
1121 hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u",
1128 hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw,
1129 struct hns3_mac_vlan_tbl_entry_cmd *req,
1130 struct hns3_cmd_desc *desc, bool is_mc)
1136 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true);
1138 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1139 memcpy(desc[0].data, req,
1140 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1141 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD,
1143 desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1144 hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD,
1146 ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM);
1148 memcpy(desc[0].data, req,
1149 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1150 ret = hns3_cmd_send(hw, desc, 1);
1153 hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.",
1157 resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff;
1158 retval = rte_le_to_cpu_16(desc[0].retval);
1160 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1161 HNS3_MAC_VLAN_LKUP);
1165 hns3_add_mac_vlan_tbl(struct hns3_hw *hw,
1166 struct hns3_mac_vlan_tbl_entry_cmd *req,
1167 struct hns3_cmd_desc *mc_desc)
1174 if (mc_desc == NULL) {
1175 struct hns3_cmd_desc desc;
1177 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false);
1178 memcpy(desc.data, req,
1179 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1180 ret = hns3_cmd_send(hw, &desc, 1);
1181 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1182 retval = rte_le_to_cpu_16(desc.retval);
1184 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1187 hns3_cmd_reuse_desc(&mc_desc[0], false);
1188 mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1189 hns3_cmd_reuse_desc(&mc_desc[1], false);
1190 mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1191 hns3_cmd_reuse_desc(&mc_desc[2], false);
1192 mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT);
1193 memcpy(mc_desc[0].data, req,
1194 sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1195 mc_desc[0].retval = 0;
1196 ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM);
1197 resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff;
1198 retval = rte_le_to_cpu_16(mc_desc[0].retval);
1200 cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1205 hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret);
1213 hns3_remove_mac_vlan_tbl(struct hns3_hw *hw,
1214 struct hns3_mac_vlan_tbl_entry_cmd *req)
1216 struct hns3_cmd_desc desc;
1221 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false);
1223 memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd));
1225 ret = hns3_cmd_send(hw, &desc, 1);
1227 hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret);
1230 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1231 retval = rte_le_to_cpu_16(desc.retval);
1233 return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code,
1234 HNS3_MAC_VLAN_REMOVE);
1238 hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1240 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1241 struct hns3_mac_vlan_tbl_entry_cmd req;
1242 struct hns3_pf *pf = &hns->pf;
1243 struct hns3_cmd_desc desc;
1244 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1245 uint16_t egress_port = 0;
1249 /* check if mac addr is valid */
1250 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1251 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1253 hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid",
1258 memset(&req, 0, sizeof(req));
1261 * In current version VF is not supported when PF is driven by DPDK
1262 * driver, the PF-related vf_id is 0, just need to configure parameters
1266 hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M,
1267 HNS3_MAC_EPORT_VFID_S, vf_id);
1269 req.egress_port = rte_cpu_to_le_16(egress_port);
1271 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1274 * Lookup the mac address in the mac_vlan table, and add
1275 * it if the entry is inexistent. Repeated unicast entry
1276 * is not allowed in the mac vlan table.
1278 ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc, false);
1279 if (ret == -ENOENT) {
1280 if (!hns3_is_umv_space_full(hw)) {
1281 ret = hns3_add_mac_vlan_tbl(hw, &req, NULL);
1283 hns3_update_umv_space(hw, false);
1287 hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size);
1292 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
1294 /* check if we just hit the duplicate */
1296 hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str);
1300 hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table",
1307 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1308 uint32_t idx, __attribute__ ((unused)) uint32_t pool)
1310 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1311 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1314 rte_spinlock_lock(&hw->lock);
1315 ret = hns3_add_uc_addr_common(hw, mac_addr);
1317 rte_spinlock_unlock(&hw->lock);
1318 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1320 hns3_err(hw, "Failed to add mac addr(%s): %d", mac_str, ret);
1325 hw->mac.default_addr_setted = true;
1326 rte_spinlock_unlock(&hw->lock);
1332 hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1334 struct hns3_mac_vlan_tbl_entry_cmd req;
1335 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1338 /* check if mac addr is valid */
1339 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1340 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1342 hns3_err(hw, "Remove unicast mac addr err! addr(%s) invalid",
1347 memset(&req, 0, sizeof(req));
1348 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1349 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false);
1350 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1351 if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */
1354 hns3_update_umv_space(hw, true);
1360 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
1362 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1363 /* index will be checked by upper level rte interface */
1364 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
1365 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1368 rte_spinlock_lock(&hw->lock);
1369 ret = hns3_remove_uc_addr_common(hw, mac_addr);
1371 rte_spinlock_unlock(&hw->lock);
1372 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1374 hns3_err(hw, "Failed to remove mac addr(%s): %d", mac_str, ret);
1379 hw->mac.default_addr_setted = false;
1380 rte_spinlock_unlock(&hw->lock);
1384 hns3_set_default_mac_addr(struct rte_eth_dev *dev,
1385 struct rte_ether_addr *mac_addr)
1387 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1388 struct rte_ether_addr *oaddr;
1389 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1390 bool default_addr_setted;
1391 bool rm_succes = false;
1394 /* check if mac addr is valid */
1395 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
1396 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1398 hns3_err(hw, "Failed to set mac addr, addr(%s) invalid",
1403 oaddr = (struct rte_ether_addr *)hw->mac.mac_addr;
1404 default_addr_setted = hw->mac.default_addr_setted;
1405 if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr))
1408 rte_spinlock_lock(&hw->lock);
1409 if (default_addr_setted) {
1410 ret = hns3_remove_uc_addr_common(hw, oaddr);
1412 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1414 hns3_warn(hw, "Remove old uc mac address(%s) fail: %d",
1421 ret = hns3_add_uc_addr_common(hw, mac_addr);
1423 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1425 hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret);
1426 goto err_add_uc_addr;
1429 ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes);
1431 hns3_err(hw, "Failed to configure mac pause address: %d", ret);
1432 goto err_pause_addr_cfg;
1435 rte_ether_addr_copy(mac_addr,
1436 (struct rte_ether_addr *)hw->mac.mac_addr);
1437 hw->mac.default_addr_setted = true;
1438 rte_spinlock_unlock(&hw->lock);
1443 ret_val = hns3_remove_uc_addr_common(hw, mac_addr);
1445 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1448 "Failed to roll back to del setted mac addr(%s): %d",
1454 ret_val = hns3_add_uc_addr_common(hw, oaddr);
1456 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1459 "Failed to restore old uc mac addr(%s): %d",
1461 hw->mac.default_addr_setted = false;
1464 rte_spinlock_unlock(&hw->lock);
1470 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
1472 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1473 struct hns3_hw *hw = &hns->hw;
1474 struct rte_ether_addr *addr;
1479 for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) {
1480 addr = &hw->data->mac_addrs[i];
1481 if (!rte_is_valid_assigned_ether_addr(addr))
1484 ret = hns3_remove_uc_addr_common(hw, addr);
1486 ret = hns3_add_uc_addr_common(hw, addr);
1489 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1492 "Failed to %s mac addr(%s). ret:%d i:%d",
1493 del ? "remove" : "restore", mac_str, ret, i);
1500 hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr)
1502 #define HNS3_VF_NUM_IN_FIRST_DESC 192
1506 if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) {
1507 word_num = vfid / 32;
1508 bit_num = vfid % 32;
1510 desc[1].data[word_num] &=
1511 rte_cpu_to_le_32(~(1UL << bit_num));
1513 desc[1].data[word_num] |=
1514 rte_cpu_to_le_32(1UL << bit_num);
1516 word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32;
1517 bit_num = vfid % 32;
1519 desc[2].data[word_num] &=
1520 rte_cpu_to_le_32(~(1UL << bit_num));
1522 desc[2].data[word_num] |=
1523 rte_cpu_to_le_32(1UL << bit_num);
1528 hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1530 struct hns3_mac_vlan_tbl_entry_cmd req;
1531 struct hns3_cmd_desc desc[3];
1532 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1536 /* Check if mac addr is valid */
1537 if (!rte_is_multicast_ether_addr(mac_addr)) {
1538 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1540 hns3_err(hw, "Failed to add mc mac addr, addr(%s) invalid",
1545 memset(&req, 0, sizeof(req));
1546 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1547 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1548 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true);
1550 /* This mac addr do not exist, add new entry for it */
1551 memset(desc[0].data, 0, sizeof(desc[0].data));
1552 memset(desc[1].data, 0, sizeof(desc[0].data));
1553 memset(desc[2].data, 0, sizeof(desc[0].data));
1557 * In current version VF is not supported when PF is driven by DPDK
1558 * driver, the PF-related vf_id is 0, just need to configure parameters
1562 hns3_update_desc_vfid(desc, vf_id, false);
1563 ret = hns3_add_mac_vlan_tbl(hw, &req, desc);
1566 hns3_err(hw, "mc mac vlan table is full");
1567 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1569 hns3_err(hw, "Failed to add mc mac addr(%s): %d", mac_str, ret);
1576 hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
1578 struct hns3_mac_vlan_tbl_entry_cmd req;
1579 struct hns3_cmd_desc desc[3];
1580 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1584 /* Check if mac addr is valid */
1585 if (!rte_is_multicast_ether_addr(mac_addr)) {
1586 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1588 hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid",
1593 memset(&req, 0, sizeof(req));
1594 hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0);
1595 hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true);
1596 ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true);
1599 * This mac addr exist, remove this handle's VFID for it.
1600 * In current version VF is not supported when PF is driven by
1601 * DPDK driver, the PF-related vf_id is 0, just need to
1602 * configure parameters for vf_id 0.
1605 hns3_update_desc_vfid(desc, vf_id, true);
1607 /* All the vfid is zero, so need to delete this entry */
1608 ret = hns3_remove_mac_vlan_tbl(hw, &req);
1609 } else if (ret == -ENOENT) {
1610 /* This mac addr doesn't exist. */
1615 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1617 hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret);
1624 hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
1625 struct rte_ether_addr *mc_addr_set,
1626 uint32_t nb_mc_addr)
1628 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1629 struct rte_ether_addr *addr;
1633 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
1634 hns3_err(hw, "Failed to set mc mac addr, nb_mc_addr(%d) "
1635 "invalid. valid range: 0~%d",
1636 nb_mc_addr, HNS3_MC_MACADDR_NUM);
1640 /* Check if input mac addresses are valid */
1641 for (i = 0; i < nb_mc_addr; i++) {
1642 addr = &mc_addr_set[i];
1643 if (!rte_is_multicast_ether_addr(addr)) {
1644 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1647 "Failed to set mc mac addr, addr(%s) invalid.",
1652 /* Check if there are duplicate addresses */
1653 for (j = i + 1; j < nb_mc_addr; j++) {
1654 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
1655 rte_ether_format_addr(mac_str,
1656 RTE_ETHER_ADDR_FMT_SIZE,
1658 hns3_err(hw, "Failed to set mc mac addr, "
1659 "addrs invalid. two same addrs(%s).",
1670 hns3_set_mc_addr_calc_addr(struct hns3_hw *hw,
1671 struct rte_ether_addr *mc_addr_set,
1673 struct rte_ether_addr *reserved_addr_list,
1674 int *reserved_addr_num,
1675 struct rte_ether_addr *add_addr_list,
1677 struct rte_ether_addr *rm_addr_list,
1680 struct rte_ether_addr *addr;
1681 int current_addr_num;
1682 int reserved_num = 0;
1690 /* Calculate the mc mac address list that should be removed */
1691 current_addr_num = hw->mc_addrs_num;
1692 for (i = 0; i < current_addr_num; i++) {
1693 addr = &hw->mc_addrs[i];
1695 for (j = 0; j < mc_addr_num; j++) {
1696 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
1703 rte_ether_addr_copy(addr, &rm_addr_list[rm_num]);
1706 rte_ether_addr_copy(addr,
1707 &reserved_addr_list[reserved_num]);
1712 /* Calculate the mc mac address list that should be added */
1713 for (i = 0; i < mc_addr_num; i++) {
1714 addr = &mc_addr_set[i];
1716 for (j = 0; j < current_addr_num; j++) {
1717 if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) {
1724 rte_ether_addr_copy(addr, &add_addr_list[add_num]);
1729 /* Reorder the mc mac address list maintained by driver */
1730 for (i = 0; i < reserved_num; i++)
1731 rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]);
1733 for (i = 0; i < rm_num; i++) {
1734 num = reserved_num + i;
1735 rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]);
1738 *reserved_addr_num = reserved_num;
1739 *add_addr_num = add_num;
1740 *rm_addr_num = rm_num;
1744 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev,
1745 struct rte_ether_addr *mc_addr_set,
1746 uint32_t nb_mc_addr)
1748 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1749 struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM];
1750 struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM];
1751 struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM];
1752 struct rte_ether_addr *addr;
1753 int reserved_addr_num;
1761 /* Check if input parameters are valid */
1762 ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
1766 rte_spinlock_lock(&hw->lock);
1769 * Calculate the mc mac address lists those should be removed and be
1770 * added, Reorder the mc mac address list maintained by driver.
1772 mc_addr_num = (int)nb_mc_addr;
1773 hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num,
1774 reserved_addr_list, &reserved_addr_num,
1775 add_addr_list, &add_addr_num,
1776 rm_addr_list, &rm_addr_num);
1778 /* Remove mc mac addresses */
1779 for (i = 0; i < rm_addr_num; i++) {
1780 num = rm_addr_num - i - 1;
1781 addr = &rm_addr_list[num];
1782 ret = hns3_remove_mc_addr(hw, addr);
1784 rte_spinlock_unlock(&hw->lock);
1790 /* Add mc mac addresses */
1791 for (i = 0; i < add_addr_num; i++) {
1792 addr = &add_addr_list[i];
1793 ret = hns3_add_mc_addr(hw, addr);
1795 rte_spinlock_unlock(&hw->lock);
1799 num = reserved_addr_num + i;
1800 rte_ether_addr_copy(addr, &hw->mc_addrs[num]);
1803 rte_spinlock_unlock(&hw->lock);
1809 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
1811 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1812 struct hns3_hw *hw = &hns->hw;
1813 struct rte_ether_addr *addr;
1818 for (i = 0; i < hw->mc_addrs_num; i++) {
1819 addr = &hw->mc_addrs[i];
1820 if (!rte_is_multicast_ether_addr(addr))
1823 ret = hns3_remove_mc_addr(hw, addr);
1825 ret = hns3_add_mc_addr(hw, addr);
1828 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1830 hns3_dbg(hw, "%s mc mac addr: %s failed",
1831 del ? "Remove" : "Restore", mac_str);
1838 hns3_check_mq_mode(struct rte_eth_dev *dev)
1840 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1841 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1842 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1843 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1844 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1845 struct rte_eth_dcb_tx_conf *dcb_tx_conf;
1850 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1851 dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
1853 if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1854 hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB_RSS is not supported. "
1855 "rx_mq_mode = %d", rx_mq_mode);
1859 if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB ||
1860 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1861 hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB and ETH_MQ_TX_VMDQ_DCB "
1862 "is not supported. rx_mq_mode = %d, tx_mq_mode = %d",
1863 rx_mq_mode, tx_mq_mode);
1867 if (rx_mq_mode == ETH_MQ_RX_DCB_RSS) {
1868 if (dcb_rx_conf->nb_tcs > pf->tc_max) {
1869 hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
1870 dcb_rx_conf->nb_tcs, pf->tc_max);
1874 if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
1875 dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
1876 hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
1877 "nb_tcs(%d) != %d or %d in rx direction.",
1878 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
1882 if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
1883 hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
1884 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
1888 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1889 if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
1890 hns3_err(hw, "dcb_tc[%d] = %d in rx direction, "
1891 "is not equal to one in tx direction.",
1892 i, dcb_rx_conf->dcb_tc[i]);
1895 if (dcb_rx_conf->dcb_tc[i] > max_tc)
1896 max_tc = dcb_rx_conf->dcb_tc[i];
1899 num_tc = max_tc + 1;
1900 if (num_tc > dcb_rx_conf->nb_tcs) {
1901 hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
1902 num_tc, dcb_rx_conf->nb_tcs);
1911 hns3_check_dcb_cfg(struct rte_eth_dev *dev)
1913 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1915 if (!hns3_dev_dcb_supported(hw)) {
1916 hns3_err(hw, "this port does not support dcb configurations.");
1920 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
1921 hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
1925 /* Check multiple queue mode */
1926 return hns3_check_mq_mode(dev);
1930 hns3_dev_configure(struct rte_eth_dev *dev)
1932 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1933 struct hns3_rss_conf *rss_cfg = &hw->rss_info;
1934 struct rte_eth_conf *conf = &dev->data->dev_conf;
1935 enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
1936 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1937 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1938 struct rte_eth_rss_conf rss_conf;
1943 * Hardware does not support where the number of rx and tx queues is
1944 * not equal in hip08.
1946 if (nb_rx_q != nb_tx_q) {
1948 "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
1949 "Hardware does not support this configuration!",
1954 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1955 hns3_err(hw, "setting link speed/duplex not supported");
1959 hw->adapter_state = HNS3_NIC_CONFIGURING;
1960 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
1961 ret = hns3_check_dcb_cfg(dev);
1966 /* When RSS is not configured, redirect the packet queue 0 */
1967 if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
1968 rss_conf = conf->rx_adv_conf.rss_conf;
1969 if (rss_conf.rss_key == NULL) {
1970 rss_conf.rss_key = rss_cfg->key;
1971 rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
1974 ret = hns3_dev_rss_hash_update(dev, &rss_conf);
1980 * If jumbo frames are enabled, MTU needs to be refreshed
1981 * according to the maximum RX packet length.
1983 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1985 * Security of max_rx_pkt_len is guaranteed in dpdk frame.
1986 * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
1987 * can safely assign to "uint16_t" type variable.
1989 mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
1990 ret = hns3_dev_mtu_set(dev, mtu);
1993 dev->data->mtu = mtu;
1996 ret = hns3_dev_configure_vlan(dev);
2000 hw->adapter_state = HNS3_NIC_CONFIGURED;
2005 hw->adapter_state = HNS3_NIC_INITIALIZED;
2010 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
2012 struct hns3_config_max_frm_size_cmd *req;
2013 struct hns3_cmd_desc desc;
2015 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);
2017 req = (struct hns3_config_max_frm_size_cmd *)desc.data;
2018 req->max_frm_size = rte_cpu_to_le_16(new_mps);
2019 req->min_frm_size = HNS3_MIN_FRAME_LEN;
2021 return hns3_cmd_send(hw, &desc, 1);
2025 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
2029 ret = hns3_set_mac_mtu(hw, mps);
2031 hns3_err(hw, "Failed to set mtu, ret = %d", ret);
2035 ret = hns3_buffer_alloc(hw);
2037 hns3_err(hw, "Failed to allocate buffer, ret = %d", ret);
2045 hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2047 struct hns3_adapter *hns = dev->data->dev_private;
2048 uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
2049 struct hns3_hw *hw = &hns->hw;
2050 bool is_jumbo_frame;
2053 if (dev->data->dev_started) {
2054 hns3_err(hw, "Failed to set mtu, port %u must be stopped "
2055 "before configuration", dev->data->port_id);
2059 rte_spinlock_lock(&hw->lock);
2060 is_jumbo_frame = frame_size > RTE_ETHER_MAX_LEN ? true : false;
2061 frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
2064 * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely
2065 * assign to "uint16_t" type variable.
2067 ret = hns3_config_mtu(hw, (uint16_t)frame_size);
2069 rte_spinlock_unlock(&hw->lock);
2070 hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d",
2071 dev->data->port_id, mtu, ret);
2074 hns->pf.mps = (uint16_t)frame_size;
2076 dev->data->dev_conf.rxmode.offloads |=
2077 DEV_RX_OFFLOAD_JUMBO_FRAME;
2079 dev->data->dev_conf.rxmode.offloads &=
2080 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2081 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2082 rte_spinlock_unlock(&hw->lock);
2088 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
2090 struct hns3_adapter *hns = eth_dev->data->dev_private;
2091 struct hns3_hw *hw = &hns->hw;
2093 info->max_rx_queues = hw->tqps_num;
2094 info->max_tx_queues = hw->tqps_num;
2095 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
2096 info->min_rx_bufsize = hw->rx_buf_len;
2097 info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
2098 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
2099 info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
2100 DEV_RX_OFFLOAD_TCP_CKSUM |
2101 DEV_RX_OFFLOAD_UDP_CKSUM |
2102 DEV_RX_OFFLOAD_SCTP_CKSUM |
2103 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
2104 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
2105 DEV_RX_OFFLOAD_KEEP_CRC |
2106 DEV_RX_OFFLOAD_SCATTER |
2107 DEV_RX_OFFLOAD_VLAN_STRIP |
2108 DEV_RX_OFFLOAD_QINQ_STRIP |
2109 DEV_RX_OFFLOAD_VLAN_FILTER |
2110 DEV_RX_OFFLOAD_VLAN_EXTEND |
2111 DEV_RX_OFFLOAD_JUMBO_FRAME);
2112 info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
2113 info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2114 DEV_TX_OFFLOAD_IPV4_CKSUM |
2115 DEV_TX_OFFLOAD_TCP_CKSUM |
2116 DEV_TX_OFFLOAD_UDP_CKSUM |
2117 DEV_TX_OFFLOAD_SCTP_CKSUM |
2118 DEV_TX_OFFLOAD_VLAN_INSERT |
2119 DEV_TX_OFFLOAD_QINQ_INSERT |
2120 DEV_TX_OFFLOAD_MULTI_SEGS |
2121 info->tx_queue_offload_capa);
2123 info->rx_desc_lim = (struct rte_eth_desc_lim) {
2124 .nb_max = HNS3_MAX_RING_DESC,
2125 .nb_min = HNS3_MIN_RING_DESC,
2126 .nb_align = HNS3_ALIGN_RING_DESC,
2129 info->tx_desc_lim = (struct rte_eth_desc_lim) {
2130 .nb_max = HNS3_MAX_RING_DESC,
2131 .nb_min = HNS3_MIN_RING_DESC,
2132 .nb_align = HNS3_ALIGN_RING_DESC,
2135 info->vmdq_queue_num = 0;
2137 info->reta_size = HNS3_RSS_IND_TBL_SIZE;
2138 info->hash_key_size = HNS3_RSS_KEY_SIZE;
2139 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
2141 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
2142 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
2143 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
2144 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
2145 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
2146 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
2152 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
2155 struct hns3_adapter *hns = eth_dev->data->dev_private;
2156 struct hns3_hw *hw = &hns->hw;
2159 ret = snprintf(fw_version, fw_size, "0x%08x", hw->fw_version);
2160 ret += 1; /* add the size of '\0' */
2161 if (fw_size < (uint32_t)ret)
2168 hns3_dev_link_update(struct rte_eth_dev *eth_dev,
2169 __rte_unused int wait_to_complete)
2171 struct hns3_adapter *hns = eth_dev->data->dev_private;
2172 struct hns3_hw *hw = &hns->hw;
2173 struct hns3_mac *mac = &hw->mac;
2174 struct rte_eth_link new_link;
2176 memset(&new_link, 0, sizeof(new_link));
2177 switch (mac->link_speed) {
2178 case ETH_SPEED_NUM_10M:
2179 case ETH_SPEED_NUM_100M:
2180 case ETH_SPEED_NUM_1G:
2181 case ETH_SPEED_NUM_10G:
2182 case ETH_SPEED_NUM_25G:
2183 case ETH_SPEED_NUM_40G:
2184 case ETH_SPEED_NUM_50G:
2185 case ETH_SPEED_NUM_100G:
2186 new_link.link_speed = mac->link_speed;
2189 new_link.link_speed = ETH_SPEED_NUM_100M;
2193 new_link.link_duplex = mac->link_duplex;
2194 new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
2195 new_link.link_autoneg =
2196 !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
2198 return rte_eth_linkstatus_set(eth_dev, &new_link);
2202 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
2204 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2205 struct hns3_pf *pf = &hns->pf;
2207 if (!(status->pf_state & HNS3_PF_STATE_DONE))
2210 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;
2216 hns3_query_function_status(struct hns3_hw *hw)
2218 #define HNS3_QUERY_MAX_CNT 10
2219 #define HNS3_QUERY_SLEEP_MSCOEND 1
2220 struct hns3_func_status_cmd *req;
2221 struct hns3_cmd_desc desc;
2225 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);
2226 req = (struct hns3_func_status_cmd *)desc.data;
2229 ret = hns3_cmd_send(hw, &desc, 1);
2231 PMD_INIT_LOG(ERR, "query function status failed %d",
2236 /* Check pf reset is done */
2240 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);
2241 } while (timeout++ < HNS3_QUERY_MAX_CNT);
2243 return hns3_parse_func_status(hw, req);
2247 hns3_query_pf_resource(struct hns3_hw *hw)
2249 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2250 struct hns3_pf *pf = &hns->pf;
2251 struct hns3_pf_res_cmd *req;
2252 struct hns3_cmd_desc desc;
2255 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
2256 ret = hns3_cmd_send(hw, &desc, 1);
2258 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret);
2262 req = (struct hns3_pf_res_cmd *)desc.data;
2263 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num);
2264 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
2265 hw->tqps_num = RTE_MIN(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
2267 if (req->tx_buf_size)
2269 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;
2271 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF;
2273 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);
2275 if (req->dv_buf_size)
2277 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;
2279 pf->dv_buf_size = HNS3_DEFAULT_DV;
2281 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
2284 hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
2285 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
2291 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)
2293 struct hns3_cfg_param_cmd *req;
2294 uint64_t mac_addr_tmp_high;
2295 uint64_t mac_addr_tmp;
2298 req = (struct hns3_cfg_param_cmd *)desc[0].data;
2300 /* get the configuration */
2301 cfg->vmdq_vport_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2302 HNS3_CFG_VMDQ_M, HNS3_CFG_VMDQ_S);
2303 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2304 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
2305 cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
2306 HNS3_CFG_TQP_DESC_N_M,
2307 HNS3_CFG_TQP_DESC_N_S);
2309 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2310 HNS3_CFG_PHY_ADDR_M,
2311 HNS3_CFG_PHY_ADDR_S);
2312 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2313 HNS3_CFG_MEDIA_TP_M,
2314 HNS3_CFG_MEDIA_TP_S);
2315 cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2316 HNS3_CFG_RX_BUF_LEN_M,
2317 HNS3_CFG_RX_BUF_LEN_S);
2318 /* get mac address */
2319 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
2320 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2321 HNS3_CFG_MAC_ADDR_H_M,
2322 HNS3_CFG_MAC_ADDR_H_S);
2324 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
2326 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2327 HNS3_CFG_DEFAULT_SPEED_M,
2328 HNS3_CFG_DEFAULT_SPEED_S);
2329 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
2330 HNS3_CFG_RSS_SIZE_M,
2331 HNS3_CFG_RSS_SIZE_S);
2333 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
2334 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
2336 req = (struct hns3_cfg_param_cmd *)desc[1].data;
2337 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);
2339 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2340 HNS3_CFG_SPEED_ABILITY_M,
2341 HNS3_CFG_SPEED_ABILITY_S);
2342 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
2343 HNS3_CFG_UMV_TBL_SPACE_M,
2344 HNS3_CFG_UMV_TBL_SPACE_S);
2345 if (!cfg->umv_space)
2346 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;
2349 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash
2350 * @hw: pointer to struct hns3_hw
2351 * @hcfg: the config structure to be getted
2354 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)
2356 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];
2357 struct hns3_cfg_param_cmd *req;
2362 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {
2364 req = (struct hns3_cfg_param_cmd *)desc[i].data;
2365 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,
2367 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,
2368 i * HNS3_CFG_RD_LEN_BYTES);
2369 /* Len should be divided by 4 when send to hardware */
2370 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,
2371 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);
2372 req->offset = rte_cpu_to_le_32(offset);
2375 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);
2377 PMD_INIT_LOG(ERR, "get config failed %d.", ret);
2381 hns3_parse_cfg(hcfg, desc);
2387 hns3_parse_speed(int speed_cmd, uint32_t *speed)
2389 switch (speed_cmd) {
2390 case HNS3_CFG_SPEED_10M:
2391 *speed = ETH_SPEED_NUM_10M;
2393 case HNS3_CFG_SPEED_100M:
2394 *speed = ETH_SPEED_NUM_100M;
2396 case HNS3_CFG_SPEED_1G:
2397 *speed = ETH_SPEED_NUM_1G;
2399 case HNS3_CFG_SPEED_10G:
2400 *speed = ETH_SPEED_NUM_10G;
2402 case HNS3_CFG_SPEED_25G:
2403 *speed = ETH_SPEED_NUM_25G;
2405 case HNS3_CFG_SPEED_40G:
2406 *speed = ETH_SPEED_NUM_40G;
2408 case HNS3_CFG_SPEED_50G:
2409 *speed = ETH_SPEED_NUM_50G;
2411 case HNS3_CFG_SPEED_100G:
2412 *speed = ETH_SPEED_NUM_100G;
2422 hns3_get_board_configuration(struct hns3_hw *hw)
2424 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2425 struct hns3_pf *pf = &hns->pf;
2426 struct hns3_cfg cfg;
2429 ret = hns3_get_board_cfg(hw, &cfg);
2431 PMD_INIT_LOG(ERR, "get board config failed %d", ret);
2435 if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER) {
2436 PMD_INIT_LOG(ERR, "media type is copper, not supported.");
2440 hw->mac.media_type = cfg.media_type;
2441 hw->rss_size_max = cfg.rss_size_max;
2442 hw->rx_buf_len = cfg.rx_buf_len;
2443 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
2444 hw->mac.phy_addr = cfg.phy_addr;
2445 hw->mac.default_addr_setted = false;
2446 hw->num_tx_desc = cfg.tqp_desc_num;
2447 hw->num_rx_desc = cfg.tqp_desc_num;
2448 hw->dcb_info.num_pg = 1;
2449 hw->dcb_info.hw_pfc_map = 0;
2451 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
2453 PMD_INIT_LOG(ERR, "Get wrong speed %d, ret = %d",
2454 cfg.default_speed, ret);
2458 pf->tc_max = cfg.tc_num;
2459 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) {
2460 PMD_INIT_LOG(WARNING,
2461 "Get TC num(%u) from flash, set TC num to 1",
2466 /* Dev does not support DCB */
2467 if (!hns3_dev_dcb_supported(hw)) {
2471 pf->pfc_max = pf->tc_max;
2473 hw->dcb_info.num_tc = 1;
2474 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max,
2475 hw->tqps_num / hw->dcb_info.num_tc);
2476 hns3_set_bit(hw->hw_tc_map, 0, 1);
2477 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE;
2479 pf->wanted_umv_size = cfg.umv_space;
2485 hns3_get_configuration(struct hns3_hw *hw)
2489 ret = hns3_query_function_status(hw);
2491 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret);
2495 /* Get pf resource */
2496 ret = hns3_query_pf_resource(hw);
2498 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret);
2502 ret = hns3_get_board_configuration(hw);
2504 PMD_INIT_LOG(ERR, "Failed to get board configuration: %d", ret);
2512 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid,
2513 uint16_t tqp_vid, bool is_pf)
2515 struct hns3_tqp_map_cmd *req;
2516 struct hns3_cmd_desc desc;
2519 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false);
2521 req = (struct hns3_tqp_map_cmd *)desc.data;
2522 req->tqp_id = rte_cpu_to_le_16(tqp_pid);
2523 req->tqp_vf = func_id;
2524 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B;
2526 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B);
2527 req->tqp_vid = rte_cpu_to_le_16(tqp_vid);
2529 ret = hns3_cmd_send(hw, &desc, 1);
2531 PMD_INIT_LOG(ERR, "TQP map failed %d", ret);
2537 hns3_map_tqp(struct hns3_hw *hw)
2539 uint16_t tqps_num = hw->total_tqps_num;
2547 * In current version VF is not supported when PF is driven by DPDK
2548 * driver, so we allocate tqps to PF as much as possible.
2551 num = DIV_ROUND_UP(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
2552 for (func_id = 0; func_id < num; func_id++) {
2554 i < HNS3_MAX_TQP_NUM_PER_FUNC && tqp_id < tqps_num; i++) {
2555 ret = hns3_map_tqps_to_func(hw, func_id, tqp_id++, i,
2566 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
2568 struct hns3_config_mac_speed_dup_cmd *req;
2569 struct hns3_cmd_desc desc;
2572 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data;
2574 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false);
2576 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
2579 case ETH_SPEED_NUM_10M:
2580 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2581 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
2583 case ETH_SPEED_NUM_100M:
2584 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2585 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
2587 case ETH_SPEED_NUM_1G:
2588 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2589 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
2591 case ETH_SPEED_NUM_10G:
2592 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2593 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
2595 case ETH_SPEED_NUM_25G:
2596 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2597 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
2599 case ETH_SPEED_NUM_40G:
2600 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2601 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
2603 case ETH_SPEED_NUM_50G:
2604 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2605 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
2607 case ETH_SPEED_NUM_100G:
2608 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
2609 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
2612 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed);
2616 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1);
2618 ret = hns3_cmd_send(hw, &desc, 1);
2620 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret);
2626 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
2628 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2629 struct hns3_pf *pf = &hns->pf;
2630 struct hns3_priv_buf *priv;
2631 uint32_t i, total_size;
2633 total_size = pf->pkt_buf_size;
2635 /* alloc tx buffer for all enabled tc */
2636 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2637 priv = &buf_alloc->priv_buf[i];
2639 if (hw->hw_tc_map & BIT(i)) {
2640 if (total_size < pf->tx_buf_size)
2643 priv->tx_buf_size = pf->tx_buf_size;
2645 priv->tx_buf_size = 0;
2647 total_size -= priv->tx_buf_size;
2654 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
2656 /* TX buffer size is unit by 128 byte */
2657 #define HNS3_BUF_SIZE_UNIT_SHIFT 7
2658 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15)
2659 struct hns3_tx_buff_alloc_cmd *req;
2660 struct hns3_cmd_desc desc;
2665 req = (struct hns3_tx_buff_alloc_cmd *)desc.data;
2667 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0);
2668 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2669 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
2671 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT;
2672 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size |
2673 HNS3_BUF_SIZE_UPDATE_EN_MSK);
2676 ret = hns3_cmd_send(hw, &desc, 1);
2678 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret);
2684 hns3_get_tc_num(struct hns3_hw *hw)
2689 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
2690 if (hw->hw_tc_map & BIT(i))
2696 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
2698 struct hns3_priv_buf *priv;
2699 uint32_t rx_priv = 0;
2702 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2703 priv = &buf_alloc->priv_buf[i];
2705 rx_priv += priv->buf_size;
2711 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
2713 uint32_t total_tx_size = 0;
2716 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
2717 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2719 return total_tx_size;
2722 /* Get the number of pfc enabled TCs, which have private buffer */
2724 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
2726 struct hns3_priv_buf *priv;
2730 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2731 priv = &buf_alloc->priv_buf[i];
2732 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
2739 /* Get the number of pfc disabled TCs, which have private buffer */
2741 hns3_get_no_pfc_priv_num(struct hns3_hw *hw,
2742 struct hns3_pkt_buf_alloc *buf_alloc)
2744 struct hns3_priv_buf *priv;
2748 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2749 priv = &buf_alloc->priv_buf[i];
2750 if (hw->hw_tc_map & BIT(i) &&
2751 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
2759 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
2762 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2763 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2764 struct hns3_pf *pf = &hns->pf;
2765 uint32_t shared_buf, aligned_mps;
2770 tc_num = hns3_get_tc_num(hw);
2771 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
2773 if (hns3_dev_dcb_supported(hw))
2774 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
2777 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF
2780 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2781 shared_std = roundup(max_t(uint32_t, shared_buf_min, shared_buf_tc),
2782 HNS3_BUF_SIZE_UNIT);
2784 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);
2785 if (rx_all < rx_priv + shared_std)
2788 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
2789 buf_alloc->s_buf.buf_size = shared_buf;
2790 if (hns3_dev_dcb_supported(hw)) {
2791 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
2792 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2793 - roundup(aligned_mps / HNS3_BUF_DIV_BY,
2794 HNS3_BUF_SIZE_UNIT);
2796 buf_alloc->s_buf.self.high =
2797 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
2798 buf_alloc->s_buf.self.low = aligned_mps;
2801 if (hns3_dev_dcb_supported(hw)) {
2802 hi_thrd = shared_buf - pf->dv_buf_size;
2804 if (tc_num <= NEED_RESERVE_TC_NUM)
2805 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2809 hi_thrd = hi_thrd / tc_num;
2811 hi_thrd = max_t(uint32_t, hi_thrd,
2812 HNS3_BUF_MUL_BY * aligned_mps);
2813 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);
2814 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;
2816 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
2817 lo_thrd = aligned_mps;
2820 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2821 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2822 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2829 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max,
2830 struct hns3_pkt_buf_alloc *buf_alloc)
2832 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2833 struct hns3_pf *pf = &hns->pf;
2834 struct hns3_priv_buf *priv;
2835 uint32_t aligned_mps;
2839 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
2840 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
2842 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2843 priv = &buf_alloc->priv_buf[i];
2850 if (!(hw->hw_tc_map & BIT(i)))
2854 if (hw->dcb_info.hw_pfc_map & BIT(i)) {
2855 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT;
2856 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2857 HNS3_BUF_SIZE_UNIT);
2860 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) :
2864 priv->buf_size = priv->wl.high + pf->dv_buf_size;
2867 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
2871 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw,
2872 struct hns3_pkt_buf_alloc *buf_alloc)
2874 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2875 struct hns3_pf *pf = &hns->pf;
2876 struct hns3_priv_buf *priv;
2877 int no_pfc_priv_num;
2882 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
2883 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc);
2885 /* let the last to be cleared first */
2886 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
2887 priv = &buf_alloc->priv_buf[i];
2888 mask = BIT((uint8_t)i);
2890 if (hw->hw_tc_map & mask &&
2891 !(hw->dcb_info.hw_pfc_map & mask)) {
2892 /* Clear the no pfc TC private buffer */
2900 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
2901 no_pfc_priv_num == 0)
2905 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
2909 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
2910 struct hns3_pkt_buf_alloc *buf_alloc)
2912 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2913 struct hns3_pf *pf = &hns->pf;
2914 struct hns3_priv_buf *priv;
2920 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
2921 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc);
2923 /* let the last to be cleared first */
2924 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
2925 priv = &buf_alloc->priv_buf[i];
2926 mask = BIT((uint8_t)i);
2928 if (hw->hw_tc_map & mask &&
2929 hw->dcb_info.hw_pfc_map & mask) {
2930 /* Reduce the number of pfc TC with private buffer */
2937 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
2942 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
2946 hns3_only_alloc_priv_buff(struct hns3_hw *hw,
2947 struct hns3_pkt_buf_alloc *buf_alloc)
2949 #define COMPENSATE_BUFFER 0x3C00
2950 #define COMPENSATE_HALF_MPS_NUM 5
2951 #define PRIV_WL_GAP 0x1800
2952 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
2953 struct hns3_pf *pf = &hns->pf;
2954 uint32_t tc_num = hns3_get_tc_num(hw);
2955 uint32_t half_mps = pf->mps >> 1;
2956 struct hns3_priv_buf *priv;
2957 uint32_t min_rx_priv;
2961 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
2963 rx_priv = rx_priv / tc_num;
2965 if (tc_num <= NEED_RESERVE_TC_NUM)
2966 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2969 * Minimum value of private buffer in rx direction (min_rx_priv) is
2970 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private
2971 * buffer if rx_priv is greater than min_rx_priv.
2973 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER +
2974 COMPENSATE_HALF_MPS_NUM * half_mps;
2975 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);
2976 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);
2978 if (rx_priv < min_rx_priv)
2981 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
2982 priv = &buf_alloc->priv_buf[i];
2989 if (!(hw->hw_tc_map & BIT(i)))
2993 priv->buf_size = rx_priv;
2994 priv->wl.high = rx_priv - pf->dv_buf_size;
2995 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2998 buf_alloc->s_buf.buf_size = 0;
3004 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
3005 * @hw: pointer to struct hns3_hw
3006 * @buf_alloc: pointer to buffer calculation data
3007 * @return: 0: calculate sucessful, negative: fail
3010 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3012 /* When DCB is not supported, rx private buffer is not allocated. */
3013 if (!hns3_dev_dcb_supported(hw)) {
3014 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3015 struct hns3_pf *pf = &hns->pf;
3016 uint32_t rx_all = pf->pkt_buf_size;
3018 rx_all -= hns3_get_tx_buff_alloced(buf_alloc);
3019 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all))
3026 * Try to allocate privated packet buffer for all TCs without share
3029 if (hns3_only_alloc_priv_buff(hw, buf_alloc))
3033 * Try to allocate privated packet buffer for all TCs with share
3036 if (hns3_rx_buf_calc_all(hw, true, buf_alloc))
3040 * For different application scenes, the enabled port number, TC number
3041 * and no_drop TC number are different. In order to obtain the better
3042 * performance, software could allocate the buffer size and configure
3043 * the waterline by tring to decrease the private buffer size according
3044 * to the order, namely, waterline of valided tc, pfc disabled tc, pfc
3047 if (hns3_rx_buf_calc_all(hw, false, buf_alloc))
3050 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc))
3053 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc))
3060 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3062 struct hns3_rx_priv_buff_cmd *req;
3063 struct hns3_cmd_desc desc;
3068 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false);
3069 req = (struct hns3_rx_priv_buff_cmd *)desc.data;
3071 /* Alloc private buffer TCs */
3072 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
3073 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i];
3076 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S);
3077 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B);
3080 buf_size = buf_alloc->s_buf.buf_size;
3081 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) |
3082 (1 << HNS3_TC0_PRI_BUF_EN_B));
3084 ret = hns3_cmd_send(hw, &desc, 1);
3086 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret);
3092 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3094 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2
3095 struct hns3_rx_priv_wl_buf *req;
3096 struct hns3_priv_buf *priv;
3097 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM];
3101 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) {
3102 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC,
3104 req = (struct hns3_rx_priv_wl_buf *)desc[i].data;
3106 /* The first descriptor set the NEXT bit to 1 */
3108 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3110 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3112 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3113 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j;
3115 priv = &buf_alloc->priv_buf[idx];
3116 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >>
3118 req->tc_wl[j].high |=
3119 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3120 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >>
3122 req->tc_wl[j].low |=
3123 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3127 /* Send 2 descriptor at one time */
3128 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM);
3130 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d",
3136 hns3_common_thrd_config(struct hns3_hw *hw,
3137 struct hns3_pkt_buf_alloc *buf_alloc)
3139 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2
3140 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf;
3141 struct hns3_rx_com_thrd *req;
3142 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM];
3143 struct hns3_tc_thrd *tc;
3148 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) {
3149 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC,
3151 req = (struct hns3_rx_com_thrd *)&desc[i].data;
3153 /* The first descriptor set the NEXT bit to 1 */
3155 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3157 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
3159 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
3160 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j;
3161 tc = &s_buf->tc_thrd[tc_idx];
3163 req->com_thrd[j].high =
3164 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S);
3165 req->com_thrd[j].high |=
3166 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3167 req->com_thrd[j].low =
3168 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S);
3169 req->com_thrd[j].low |=
3170 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3174 /* Send 2 descriptors at one time */
3175 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM);
3177 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret);
3183 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
3185 struct hns3_shared_buf *buf = &buf_alloc->s_buf;
3186 struct hns3_rx_com_wl *req;
3187 struct hns3_cmd_desc desc;
3190 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false);
3192 req = (struct hns3_rx_com_wl *)desc.data;
3193 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S);
3194 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3196 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S);
3197 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
3199 ret = hns3_cmd_send(hw, &desc, 1);
3201 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret);
3207 hns3_buffer_alloc(struct hns3_hw *hw)
3209 struct hns3_pkt_buf_alloc pkt_buf;
3212 memset(&pkt_buf, 0, sizeof(pkt_buf));
3213 ret = hns3_tx_buffer_calc(hw, &pkt_buf);
3216 "could not calc tx buffer size for all TCs %d",
3221 ret = hns3_tx_buffer_alloc(hw, &pkt_buf);
3223 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret);
3227 ret = hns3_rx_buffer_calc(hw, &pkt_buf);
3230 "could not calc rx priv buffer size for all TCs %d",
3235 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf);
3237 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret);
3241 if (hns3_dev_dcb_supported(hw)) {
3242 ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
3245 "could not configure rx private waterline %d",
3250 ret = hns3_common_thrd_config(hw, &pkt_buf);
3253 "could not configure common threshold %d",
3259 ret = hns3_common_wl_config(hw, &pkt_buf);
3261 PMD_INIT_LOG(ERR, "could not configure common waterline %d",
3268 hns3_mac_init(struct hns3_hw *hw)
3270 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
3271 struct hns3_mac *mac = &hw->mac;
3272 struct hns3_pf *pf = &hns->pf;
3275 pf->support_sfp_query = true;
3276 mac->link_duplex = ETH_LINK_FULL_DUPLEX;
3277 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
3279 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
3283 mac->link_status = ETH_LINK_DOWN;
3285 return hns3_config_mtu(hw, pf->mps);
3289 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
3291 #define HNS3_ETHERTYPE_SUCCESS_ADD 0
3292 #define HNS3_ETHERTYPE_ALREADY_ADD 1
3293 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2
3294 #define HNS3_ETHERTYPE_KEY_CONFLICT 3
3299 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
3304 switch (resp_code) {
3305 case HNS3_ETHERTYPE_SUCCESS_ADD:
3306 case HNS3_ETHERTYPE_ALREADY_ADD:
3309 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW:
3311 "add mac ethertype failed for manager table overflow.");
3312 return_status = -EIO;
3314 case HNS3_ETHERTYPE_KEY_CONFLICT:
3315 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict.");
3316 return_status = -EIO;
3320 "add mac ethertype failed for undefined, code=%d.",
3322 return_status = -EIO;
3325 return return_status;
3329 hns3_add_mgr_tbl(struct hns3_hw *hw,
3330 const struct hns3_mac_mgr_tbl_entry_cmd *req)
3332 struct hns3_cmd_desc desc;
3337 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false);
3338 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd));
3340 ret = hns3_cmd_send(hw, &desc, 1);
3343 "add mac ethertype failed for cmd_send, ret =%d.",
3348 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
3349 retval = rte_le_to_cpu_16(desc.retval);
3351 return hns3_get_mac_ethertype_cmd_status(retval, resp_code);
3355 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table,
3356 int *table_item_num)
3358 struct hns3_mac_mgr_tbl_entry_cmd *tbl;
3361 * In current version, we add one item in management table as below:
3362 * 0x0180C200000E -- LLDP MC address
3365 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B;
3366 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP);
3367 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200));
3368 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E));
3369 tbl->i_port_bitmap = 0x1;
3370 *table_item_num = 1;
3374 hns3_init_mgr_tbl(struct hns3_hw *hw)
3376 #define HNS_MAC_MGR_TBL_MAX_SIZE 16
3377 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE];
3382 memset(mgr_table, 0, sizeof(mgr_table));
3383 hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
3384 for (i = 0; i < table_item_num; i++) {
3385 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
3387 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
3397 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
3398 bool en_mc, bool en_bc, int vport_id)
3403 memset(param, 0, sizeof(struct hns3_promisc_param));
3405 param->enable = HNS3_PROMISC_EN_UC;
3407 param->enable |= HNS3_PROMISC_EN_MC;
3409 param->enable |= HNS3_PROMISC_EN_BC;
3410 param->vf_id = vport_id;
3414 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
3416 struct hns3_promisc_cfg_cmd *req;
3417 struct hns3_cmd_desc desc;
3420 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
3422 req = (struct hns3_promisc_cfg_cmd *)desc.data;
3423 req->vf_id = param->vf_id;
3424 req->flag = (param->enable << HNS3_PROMISC_EN_B) |
3425 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
3427 ret = hns3_cmd_send(hw, &desc, 1);
3429 PMD_INIT_LOG(ERR, "Set promisc mode fail, status is %d", ret);
3435 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
3437 struct hns3_promisc_param param;
3438 bool en_bc_pmc = true;
3443 * In current version VF is not supported when PF is driven by DPDK
3444 * driver, the PF-related vf_id is 0, just need to configure parameters
3449 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
3450 ret = hns3_cmd_set_promisc_mode(hw, ¶m);
3458 hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
3460 struct hns3_adapter *hns = dev->data->dev_private;
3461 struct hns3_hw *hw = &hns->hw;
3462 bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
3465 rte_spinlock_lock(&hw->lock);
3466 ret = hns3_set_promisc_mode(hw, true, en_mc_pmc);
3467 rte_spinlock_unlock(&hw->lock);
3469 hns3_err(hw, "Failed to enable promiscuous mode: %d", ret);
3475 hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
3477 struct hns3_adapter *hns = dev->data->dev_private;
3478 struct hns3_hw *hw = &hns->hw;
3479 bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
3482 /* If now in all_multicast mode, must remain in all_multicast mode. */
3483 rte_spinlock_lock(&hw->lock);
3484 ret = hns3_set_promisc_mode(hw, false, en_mc_pmc);
3485 rte_spinlock_unlock(&hw->lock);
3487 hns3_err(hw, "Failed to disable promiscuous mode: %d", ret);
3493 hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
3495 struct hns3_adapter *hns = dev->data->dev_private;
3496 struct hns3_hw *hw = &hns->hw;
3497 bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
3500 rte_spinlock_lock(&hw->lock);
3501 ret = hns3_set_promisc_mode(hw, en_uc_pmc, true);
3502 rte_spinlock_unlock(&hw->lock);
3504 hns3_err(hw, "Failed to enable allmulticast mode: %d", ret);
3510 hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
3512 struct hns3_adapter *hns = dev->data->dev_private;
3513 struct hns3_hw *hw = &hns->hw;
3514 bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
3517 /* If now in promiscuous mode, must remain in all_multicast mode. */
3518 if (dev->data->promiscuous == 1)
3521 rte_spinlock_lock(&hw->lock);
3522 ret = hns3_set_promisc_mode(hw, en_uc_pmc, false);
3523 rte_spinlock_unlock(&hw->lock);
3525 hns3_err(hw, "Failed to disable allmulticast mode: %d", ret);
3531 hns3_get_sfp_speed(struct hns3_hw *hw, uint32_t *speed)
3533 struct hns3_sfp_speed_cmd *resp;
3534 struct hns3_cmd_desc desc;
3537 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SFP_GET_SPEED, true);
3538 resp = (struct hns3_sfp_speed_cmd *)desc.data;
3539 ret = hns3_cmd_send(hw, &desc, 1);
3540 if (ret == -EOPNOTSUPP) {
3541 hns3_err(hw, "IMP do not support get SFP speed %d", ret);
3544 hns3_err(hw, "get sfp speed failed %d", ret);
3548 *speed = resp->sfp_speed;
3554 hns3_check_speed_dup(uint8_t duplex, uint32_t speed)
3556 if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M))
3557 duplex = ETH_LINK_FULL_DUPLEX;
3563 hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
3565 struct hns3_mac *mac = &hw->mac;
3568 duplex = hns3_check_speed_dup(duplex, speed);
3569 if (mac->link_speed == speed && mac->link_duplex == duplex)
3572 ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex);
3576 mac->link_speed = speed;
3577 mac->link_duplex = duplex;
3583 hns3_update_speed_duplex(struct rte_eth_dev *eth_dev)
3585 struct hns3_adapter *hns = eth_dev->data->dev_private;
3586 struct hns3_hw *hw = &hns->hw;
3587 struct hns3_pf *pf = &hns->pf;
3591 /* If IMP do not support get SFP/qSFP speed, return directly */
3592 if (!pf->support_sfp_query)
3595 ret = hns3_get_sfp_speed(hw, &speed);
3596 if (ret == -EOPNOTSUPP) {
3597 pf->support_sfp_query = false;
3602 if (speed == ETH_SPEED_NUM_NONE)
3603 return 0; /* do nothing if no SFP */
3605 /* Config full duplex for SFP */
3606 return hns3_cfg_mac_speed_dup(hw, speed, ETH_LINK_FULL_DUPLEX);
3610 hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable)
3612 struct hns3_config_mac_mode_cmd *req;
3613 struct hns3_cmd_desc desc;
3614 uint32_t loop_en = 0;
3618 req = (struct hns3_config_mac_mode_cmd *)desc.data;
3620 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false);
3623 hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val);
3624 hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val);
3625 hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val);
3626 hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val);
3627 hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0);
3628 hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0);
3629 hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0);
3630 hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0);
3631 hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val);
3632 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val);
3633 hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val);
3634 hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val);
3635 hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val);
3636 hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val);
3637 req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en);
3639 ret = hns3_cmd_send(hw, &desc, 1);
3641 PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret);
3647 hns3_get_mac_link_status(struct hns3_hw *hw)
3649 struct hns3_link_status_cmd *req;
3650 struct hns3_cmd_desc desc;
3654 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true);
3655 ret = hns3_cmd_send(hw, &desc, 1);
3657 hns3_err(hw, "get link status cmd failed %d", ret);
3661 req = (struct hns3_link_status_cmd *)desc.data;
3662 link_status = req->status & HNS3_LINK_STATUS_UP_M;
3664 return !!link_status;
3668 hns3_update_link_status(struct hns3_hw *hw)
3672 state = hns3_get_mac_link_status(hw);
3673 if (state != hw->mac.link_status)
3674 hw->mac.link_status = state;
3678 hns3_service_handler(void *param)
3680 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
3681 struct hns3_adapter *hns = eth_dev->data->dev_private;
3682 struct hns3_hw *hw = &hns->hw;
3684 hns3_update_speed_duplex(eth_dev);
3685 hns3_update_link_status(hw);
3687 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
3691 hns3_init_hardware(struct hns3_adapter *hns)
3693 struct hns3_hw *hw = &hns->hw;
3696 ret = hns3_map_tqp(hw);
3698 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
3702 ret = hns3_init_umv_space(hw);
3704 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret);
3708 ret = hns3_mac_init(hw);
3710 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret);
3714 ret = hns3_init_mgr_tbl(hw);
3716 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret);
3720 ret = hns3_set_promisc_mode(hw, false, false);
3722 PMD_INIT_LOG(ERR, "Failed to set promisc mode: %d", ret);
3726 ret = hns3_init_vlan_config(hns);
3728 PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret);
3732 ret = hns3_dcb_init(hw);
3734 PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret);
3738 ret = hns3_init_fd_config(hns);
3740 PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret);
3744 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
3746 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
3750 ret = hns3_config_gro(hw, false);
3752 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
3758 hns3_uninit_umv_space(hw);
3763 hns3_init_pf(struct rte_eth_dev *eth_dev)
3765 struct rte_device *dev = eth_dev->device;
3766 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
3767 struct hns3_adapter *hns = eth_dev->data->dev_private;
3768 struct hns3_hw *hw = &hns->hw;
3771 PMD_INIT_FUNC_TRACE();
3773 /* Get hardware io base address from pcie BAR2 IO space */
3774 hw->io_base = pci_dev->mem_resource[2].addr;
3776 /* Firmware command queue initialize */
3777 ret = hns3_cmd_init_queue(hw);
3779 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
3780 goto err_cmd_init_queue;
3783 hns3_clear_all_event_cause(hw);
3785 /* Firmware command initialize */
3786 ret = hns3_cmd_init(hw);
3788 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
3792 ret = rte_intr_callback_register(&pci_dev->intr_handle,
3793 hns3_interrupt_handler,
3796 PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
3797 goto err_intr_callback_register;
3800 /* Enable interrupt */
3801 rte_intr_enable(&pci_dev->intr_handle);
3802 hns3_pf_enable_irq0(hw);
3804 /* Get configuration */
3805 ret = hns3_get_configuration(hw);
3807 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
3808 goto err_get_config;
3811 ret = hns3_init_hardware(hns);
3813 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
3814 goto err_get_config;
3817 /* Initialize flow director filter list & hash */
3818 ret = hns3_fdir_filter_init(hns);
3820 PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
3824 hns3_set_default_rss_args(hw);
3826 ret = hns3_enable_hw_error_intr(hns, true);
3828 PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d",
3836 hns3_fdir_filter_uninit(hns);
3838 hns3_uninit_umv_space(hw);
3841 hns3_pf_disable_irq0(hw);
3842 rte_intr_disable(&pci_dev->intr_handle);
3843 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
3846 err_intr_callback_register:
3847 hns3_cmd_uninit(hw);
3850 hns3_cmd_destroy_queue(hw);
3859 hns3_uninit_pf(struct rte_eth_dev *eth_dev)
3861 struct hns3_adapter *hns = eth_dev->data->dev_private;
3862 struct rte_device *dev = eth_dev->device;
3863 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
3864 struct hns3_hw *hw = &hns->hw;
3866 PMD_INIT_FUNC_TRACE();
3868 hns3_enable_hw_error_intr(hns, false);
3869 hns3_rss_uninit(hns);
3870 hns3_fdir_filter_uninit(hns);
3871 hns3_uninit_umv_space(hw);
3872 hns3_pf_disable_irq0(hw);
3873 rte_intr_disable(&pci_dev->intr_handle);
3874 hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
3876 hns3_cmd_uninit(hw);
3877 hns3_cmd_destroy_queue(hw);
3882 hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
3884 struct hns3_hw *hw = &hns->hw;
3887 ret = hns3_dcb_cfg_update(hns);
3892 ret = hns3_start_queues(hns, reset_queue);
3894 PMD_INIT_LOG(ERR, "Failed to start queues: %d", ret);
3899 ret = hns3_cfg_mac_mode(hw, true);
3901 PMD_INIT_LOG(ERR, "Failed to enable MAC: %d", ret);
3902 goto err_config_mac_mode;
3906 err_config_mac_mode:
3907 hns3_stop_queues(hns, true);
3912 hns3_dev_start(struct rte_eth_dev *eth_dev)
3914 struct hns3_adapter *hns = eth_dev->data->dev_private;
3915 struct hns3_hw *hw = &hns->hw;
3918 PMD_INIT_FUNC_TRACE();
3920 rte_spinlock_lock(&hw->lock);
3921 hw->adapter_state = HNS3_NIC_STARTING;
3923 ret = hns3_do_start(hns, true);
3925 hw->adapter_state = HNS3_NIC_CONFIGURED;
3926 rte_spinlock_unlock(&hw->lock);
3930 hw->adapter_state = HNS3_NIC_STARTED;
3931 rte_spinlock_unlock(&hw->lock);
3932 hns3_set_rxtx_function(eth_dev);
3934 hns3_info(hw, "hns3 dev start successful!");
3939 hns3_do_stop(struct hns3_adapter *hns)
3941 struct hns3_hw *hw = &hns->hw;
3945 ret = hns3_cfg_mac_mode(hw, false);
3948 hw->mac.link_status = ETH_LINK_DOWN;
3950 hns3_configure_all_mac_addr(hns, true);
3952 hw->mac.default_addr_setted = false;
3953 return hns3_stop_queues(hns, reset_queue);
3957 hns3_dev_stop(struct rte_eth_dev *eth_dev)
3959 struct hns3_adapter *hns = eth_dev->data->dev_private;
3960 struct hns3_hw *hw = &hns->hw;
3962 PMD_INIT_FUNC_TRACE();
3964 hw->adapter_state = HNS3_NIC_STOPPING;
3965 hns3_set_rxtx_function(eth_dev);
3967 rte_spinlock_lock(&hw->lock);
3970 hns3_dev_release_mbufs(hns);
3971 hw->adapter_state = HNS3_NIC_CONFIGURED;
3972 rte_spinlock_unlock(&hw->lock);
3976 hns3_dev_close(struct rte_eth_dev *eth_dev)
3978 struct hns3_adapter *hns = eth_dev->data->dev_private;
3979 struct hns3_hw *hw = &hns->hw;
3981 if (hw->adapter_state == HNS3_NIC_STARTED)
3982 hns3_dev_stop(eth_dev);
3984 hw->adapter_state = HNS3_NIC_CLOSING;
3985 rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
3987 hns3_configure_all_mc_mac_addr(hns, true);
3988 hns3_remove_all_vlan_table(hns);
3989 hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
3990 hns3_uninit_pf(eth_dev);
3991 hns3_free_all_queues(eth_dev);
3992 rte_free(eth_dev->process_private);
3993 eth_dev->process_private = NULL;
3994 hw->adapter_state = HNS3_NIC_CLOSED;
3995 hns3_warn(hw, "Close port %d finished", hw->data->port_id);
3999 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4001 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4002 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4004 fc_conf->pause_time = pf->pause_time;
4006 /* return fc current mode */
4007 switch (hw->current_mode) {
4009 fc_conf->mode = RTE_FC_FULL;
4011 case HNS3_FC_TX_PAUSE:
4012 fc_conf->mode = RTE_FC_TX_PAUSE;
4014 case HNS3_FC_RX_PAUSE:
4015 fc_conf->mode = RTE_FC_RX_PAUSE;
4019 fc_conf->mode = RTE_FC_NONE;
4027 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
4031 hw->requested_mode = HNS3_FC_NONE;
4033 case RTE_FC_RX_PAUSE:
4034 hw->requested_mode = HNS3_FC_RX_PAUSE;
4036 case RTE_FC_TX_PAUSE:
4037 hw->requested_mode = HNS3_FC_TX_PAUSE;
4040 hw->requested_mode = HNS3_FC_FULL;
4043 hw->requested_mode = HNS3_FC_NONE;
4044 hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
4045 "configured to RTE_FC_NONE", mode);
4051 hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4053 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4054 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4057 if (fc_conf->high_water || fc_conf->low_water ||
4058 fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) {
4059 hns3_err(hw, "Unsupported flow control settings specified, "
4060 "high_water(%u), low_water(%u), send_xon(%u) and "
4061 "mac_ctrl_frame_fwd(%u) must be set to '0'",
4062 fc_conf->high_water, fc_conf->low_water,
4063 fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd);
4066 if (fc_conf->autoneg) {
4067 hns3_err(hw, "Unsupported fc auto-negotiation setting.");
4070 if (!fc_conf->pause_time) {
4071 hns3_err(hw, "Invalid pause time %d setting.",
4072 fc_conf->pause_time);
4076 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
4077 hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) {
4078 hns3_err(hw, "PFC is enabled. Cannot set MAC pause. "
4079 "current_fc_status = %d", hw->current_fc_status);
4083 hns3_get_fc_mode(hw, fc_conf->mode);
4084 if (hw->requested_mode == hw->current_mode &&
4085 pf->pause_time == fc_conf->pause_time)
4088 rte_spinlock_lock(&hw->lock);
4089 ret = hns3_fc_enable(dev, fc_conf);
4090 rte_spinlock_unlock(&hw->lock);
4096 hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,
4097 struct rte_eth_pfc_conf *pfc_conf)
4099 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4100 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4104 if (!hns3_dev_dcb_supported(hw)) {
4105 hns3_err(hw, "This port does not support dcb configurations.");
4109 if (pfc_conf->fc.high_water || pfc_conf->fc.low_water ||
4110 pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) {
4111 hns3_err(hw, "Unsupported flow control settings specified, "
4112 "high_water(%u), low_water(%u), send_xon(%u) and "
4113 "mac_ctrl_frame_fwd(%u) must be set to '0'",
4114 pfc_conf->fc.high_water, pfc_conf->fc.low_water,
4115 pfc_conf->fc.send_xon,
4116 pfc_conf->fc.mac_ctrl_frame_fwd);
4119 if (pfc_conf->fc.autoneg) {
4120 hns3_err(hw, "Unsupported fc auto-negotiation setting.");
4123 if (pfc_conf->fc.pause_time == 0) {
4124 hns3_err(hw, "Invalid pause time %d setting.",
4125 pfc_conf->fc.pause_time);
4129 if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE ||
4130 hw->current_fc_status == HNS3_FC_STATUS_PFC)) {
4131 hns3_err(hw, "MAC pause is enabled. Cannot set PFC."
4132 "current_fc_status = %d", hw->current_fc_status);
4136 priority = pfc_conf->priority;
4137 hns3_get_fc_mode(hw, pfc_conf->fc.mode);
4138 if (hw->dcb_info.pfc_en & BIT(priority) &&
4139 hw->requested_mode == hw->current_mode &&
4140 pfc_conf->fc.pause_time == pf->pause_time)
4143 rte_spinlock_lock(&hw->lock);
4144 ret = hns3_dcb_pfc_enable(dev, pfc_conf);
4145 rte_spinlock_unlock(&hw->lock);
4151 hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
4153 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4154 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4155 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4158 rte_spinlock_lock(&hw->lock);
4159 if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG)
4160 dcb_info->nb_tcs = pf->local_max_tc;
4162 dcb_info->nb_tcs = 1;
4164 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
4165 dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i];
4166 for (i = 0; i < dcb_info->nb_tcs; i++)
4167 dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];
4169 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
4170 dcb_info->tc_queue.tc_rxq[0][i].base =
4171 hw->tc_queue[i].tqp_offset;
4172 dcb_info->tc_queue.tc_txq[0][i].base =
4173 hw->tc_queue[i].tqp_offset;
4174 dcb_info->tc_queue.tc_rxq[0][i].nb_queue =
4175 hw->tc_queue[i].tqp_count;
4176 dcb_info->tc_queue.tc_txq[0][i].nb_queue =
4177 hw->tc_queue[i].tqp_count;
4179 rte_spinlock_unlock(&hw->lock);
4184 static const struct eth_dev_ops hns3_eth_dev_ops = {
4185 .dev_start = hns3_dev_start,
4186 .dev_stop = hns3_dev_stop,
4187 .dev_close = hns3_dev_close,
4188 .promiscuous_enable = hns3_dev_promiscuous_enable,
4189 .promiscuous_disable = hns3_dev_promiscuous_disable,
4190 .allmulticast_enable = hns3_dev_allmulticast_enable,
4191 .allmulticast_disable = hns3_dev_allmulticast_disable,
4192 .mtu_set = hns3_dev_mtu_set,
4193 .dev_infos_get = hns3_dev_infos_get,
4194 .fw_version_get = hns3_fw_version_get,
4195 .rx_queue_setup = hns3_rx_queue_setup,
4196 .tx_queue_setup = hns3_tx_queue_setup,
4197 .rx_queue_release = hns3_dev_rx_queue_release,
4198 .tx_queue_release = hns3_dev_tx_queue_release,
4199 .dev_configure = hns3_dev_configure,
4200 .flow_ctrl_get = hns3_flow_ctrl_get,
4201 .flow_ctrl_set = hns3_flow_ctrl_set,
4202 .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,
4203 .mac_addr_add = hns3_add_mac_addr,
4204 .mac_addr_remove = hns3_remove_mac_addr,
4205 .mac_addr_set = hns3_set_default_mac_addr,
4206 .set_mc_addr_list = hns3_set_mc_mac_addr_list,
4207 .link_update = hns3_dev_link_update,
4208 .rss_hash_update = hns3_dev_rss_hash_update,
4209 .rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
4210 .reta_update = hns3_dev_rss_reta_update,
4211 .reta_query = hns3_dev_rss_reta_query,
4212 .filter_ctrl = hns3_dev_filter_ctrl,
4213 .vlan_filter_set = hns3_vlan_filter_set,
4214 .vlan_tpid_set = hns3_vlan_tpid_set,
4215 .vlan_offload_set = hns3_vlan_offload_set,
4216 .vlan_pvid_set = hns3_vlan_pvid_set,
4217 .get_reg = hns3_get_regs,
4218 .get_dcb_info = hns3_get_dcb_info,
4219 .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
4223 hns3_dev_init(struct rte_eth_dev *eth_dev)
4225 struct rte_device *dev = eth_dev->device;
4226 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
4227 struct hns3_adapter *hns = eth_dev->data->dev_private;
4228 struct hns3_hw *hw = &hns->hw;
4229 uint16_t device_id = pci_dev->id.device_id;
4232 PMD_INIT_FUNC_TRACE();
4233 eth_dev->process_private = (struct hns3_process_private *)
4234 rte_zmalloc_socket("hns3_filter_list",
4235 sizeof(struct hns3_process_private),
4236 RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
4237 if (eth_dev->process_private == NULL) {
4238 PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
4241 /* initialize flow filter lists */
4242 hns3_filterlist_init(eth_dev);
4244 hns3_set_rxtx_function(eth_dev);
4245 eth_dev->dev_ops = &hns3_eth_dev_ops;
4246 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
4249 hw->adapter_state = HNS3_NIC_UNINITIALIZED;
4251 if (device_id == HNS3_DEV_ID_25GE_RDMA ||
4252 device_id == HNS3_DEV_ID_50GE_RDMA ||
4253 device_id == HNS3_DEV_ID_100G_RDMA_MACSEC)
4254 hns3_set_bit(hw->flag, HNS3_DEV_SUPPORT_DCB_B, 1);
4257 hw->data = eth_dev->data;
4260 * Set default max packet size according to the mtu
4261 * default vale in DPDK frame.
4263 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
4265 ret = hns3_init_pf(eth_dev);
4267 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
4271 /* Allocate memory for storing MAC addresses */
4272 eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac",
4273 sizeof(struct rte_ether_addr) *
4274 HNS3_UC_MACADDR_NUM, 0);
4275 if (eth_dev->data->mac_addrs == NULL) {
4276 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
4277 "to store MAC addresses",
4278 sizeof(struct rte_ether_addr) *
4279 HNS3_UC_MACADDR_NUM);
4281 goto err_rte_zmalloc;
4284 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
4285 ð_dev->data->mac_addrs[0]);
4287 hw->adapter_state = HNS3_NIC_INITIALIZED;
4289 * Pass the information to the rte_eth_dev_close() that it should also
4290 * release the private port resources.
4292 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
4294 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
4295 hns3_info(hw, "hns3 dev initialization successful!");
4299 hns3_uninit_pf(eth_dev);
4302 eth_dev->dev_ops = NULL;
4303 eth_dev->rx_pkt_burst = NULL;
4304 eth_dev->tx_pkt_burst = NULL;
4305 eth_dev->tx_pkt_prepare = NULL;
4306 rte_free(eth_dev->process_private);
4307 eth_dev->process_private = NULL;
4312 hns3_dev_uninit(struct rte_eth_dev *eth_dev)
4314 struct hns3_adapter *hns = eth_dev->data->dev_private;
4315 struct hns3_hw *hw = &hns->hw;
4317 PMD_INIT_FUNC_TRACE();
4319 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
4322 eth_dev->dev_ops = NULL;
4323 eth_dev->rx_pkt_burst = NULL;
4324 eth_dev->tx_pkt_burst = NULL;
4325 eth_dev->tx_pkt_prepare = NULL;
4326 if (hw->adapter_state < HNS3_NIC_CLOSING)
4327 hns3_dev_close(eth_dev);
4329 hw->adapter_state = HNS3_NIC_REMOVED;
4334 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
4335 struct rte_pci_device *pci_dev)
4337 return rte_eth_dev_pci_generic_probe(pci_dev,
4338 sizeof(struct hns3_adapter),
4343 eth_hns3_pci_remove(struct rte_pci_device *pci_dev)
4345 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit);
4348 static const struct rte_pci_id pci_id_hns3_map[] = {
4349 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) },
4350 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) },
4351 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) },
4352 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
4353 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
4354 { .vendor_id = 0, /* sentinel */ },
4357 static struct rte_pci_driver rte_hns3_pmd = {
4358 .id_table = pci_id_hns3_map,
4359 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
4360 .probe = eth_hns3_pci_probe,
4361 .remove = eth_hns3_pci_remove,
4364 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
4365 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
4366 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
4368 RTE_INIT(hns3_init_log)
4370 hns3_logtype_init = rte_log_register("pmd.net.hns3.init");
4371 if (hns3_logtype_init >= 0)
4372 rte_log_set_level(hns3_logtype_init, RTE_LOG_NOTICE);
4373 hns3_logtype_driver = rte_log_register("pmd.net.hns3.driver");
4374 if (hns3_logtype_driver >= 0)
4375 rte_log_set_level(hns3_logtype_driver, RTE_LOG_NOTICE);