1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
12 #include <rte_bus_pci.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_ethdev_pci.h>
24 #include "hns3_ethdev.h"
25 #include "hns3_logs.h"
26 #include "hns3_regs.h"
28 #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32
29 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1
31 int hns3_logtype_init;
32 int hns3_logtype_driver;
35 hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,
36 unsigned int tso_mss_max)
38 struct hns3_cfg_tso_status_cmd *req;
39 struct hns3_cmd_desc desc;
42 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false);
44 req = (struct hns3_cfg_tso_status_cmd *)desc.data;
47 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
49 req->tso_mss_min = rte_cpu_to_le_16(tso_mss);
52 hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,
54 req->tso_mss_max = rte_cpu_to_le_16(tso_mss);
56 return hns3_cmd_send(hw, &desc, 1);
60 hns3_config_gro(struct hns3_hw *hw, bool en)
62 struct hns3_cfg_gro_status_cmd *req;
63 struct hns3_cmd_desc desc;
66 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);
67 req = (struct hns3_cfg_gro_status_cmd *)desc.data;
69 req->gro_en = rte_cpu_to_le_16(en ? 1 : 0);
71 ret = hns3_cmd_send(hw, &desc, 1);
73 hns3_err(hw, "GRO hardware config cmd failed, ret = %d", ret);
79 hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size,
80 uint16_t *allocated_size, bool is_alloc)
82 struct hns3_umv_spc_alc_cmd *req;
83 struct hns3_cmd_desc desc;
86 req = (struct hns3_umv_spc_alc_cmd *)desc.data;
87 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false);
88 hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1);
89 req->space_size = rte_cpu_to_le_32(space_size);
91 ret = hns3_cmd_send(hw, &desc, 1);
93 PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d",
94 is_alloc ? "allocate" : "free", ret);
98 if (is_alloc && allocated_size)
99 *allocated_size = rte_le_to_cpu_32(desc.data[1]);
105 hns3_init_umv_space(struct hns3_hw *hw)
107 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
108 struct hns3_pf *pf = &hns->pf;
109 uint16_t allocated_size = 0;
112 ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size,
117 if (allocated_size < pf->wanted_umv_size)
118 PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u",
119 pf->wanted_umv_size, allocated_size);
121 pf->max_umv_size = (!!allocated_size) ? allocated_size :
123 pf->used_umv_size = 0;
128 hns3_uninit_umv_space(struct hns3_hw *hw)
130 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
131 struct hns3_pf *pf = &hns->pf;
134 if (pf->max_umv_size == 0)
137 ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false);
141 pf->max_umv_size = 0;
147 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
149 struct hns3_config_max_frm_size_cmd *req;
150 struct hns3_cmd_desc desc;
152 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);
154 req = (struct hns3_config_max_frm_size_cmd *)desc.data;
155 req->max_frm_size = rte_cpu_to_le_16(new_mps);
156 req->min_frm_size = HNS3_MIN_FRAME_LEN;
158 return hns3_cmd_send(hw, &desc, 1);
162 hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)
166 ret = hns3_set_mac_mtu(hw, mps);
168 hns3_err(hw, "Failed to set mtu, ret = %d", ret);
172 ret = hns3_buffer_alloc(hw);
174 hns3_err(hw, "Failed to allocate buffer, ret = %d", ret);
182 hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)
184 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
185 struct hns3_pf *pf = &hns->pf;
187 if (!(status->pf_state & HNS3_PF_STATE_DONE))
190 pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;
196 hns3_query_function_status(struct hns3_hw *hw)
198 #define HNS3_QUERY_MAX_CNT 10
199 #define HNS3_QUERY_SLEEP_MSCOEND 1
200 struct hns3_func_status_cmd *req;
201 struct hns3_cmd_desc desc;
205 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);
206 req = (struct hns3_func_status_cmd *)desc.data;
209 ret = hns3_cmd_send(hw, &desc, 1);
211 PMD_INIT_LOG(ERR, "query function status failed %d",
216 /* Check pf reset is done */
220 rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);
221 } while (timeout++ < HNS3_QUERY_MAX_CNT);
223 return hns3_parse_func_status(hw, req);
227 hns3_query_pf_resource(struct hns3_hw *hw)
229 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
230 struct hns3_pf *pf = &hns->pf;
231 struct hns3_pf_res_cmd *req;
232 struct hns3_cmd_desc desc;
235 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
236 ret = hns3_cmd_send(hw, &desc, 1);
238 PMD_INIT_LOG(ERR, "query pf resource failed %d", ret);
242 req = (struct hns3_pf_res_cmd *)desc.data;
243 hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num);
244 pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
245 hw->tqps_num = RTE_MIN(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
247 if (req->tx_buf_size)
249 rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;
251 pf->tx_buf_size = HNS3_DEFAULT_TX_BUF;
253 pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);
255 if (req->dv_buf_size)
257 rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;
259 pf->dv_buf_size = HNS3_DEFAULT_DV;
261 pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
264 hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
265 HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
271 hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)
273 struct hns3_cfg_param_cmd *req;
274 uint64_t mac_addr_tmp_high;
275 uint64_t mac_addr_tmp;
278 req = (struct hns3_cfg_param_cmd *)desc[0].data;
280 /* get the configuration */
281 cfg->vmdq_vport_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
282 HNS3_CFG_VMDQ_M, HNS3_CFG_VMDQ_S);
283 cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
284 HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);
285 cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),
286 HNS3_CFG_TQP_DESC_N_M,
287 HNS3_CFG_TQP_DESC_N_S);
289 cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
291 HNS3_CFG_PHY_ADDR_S);
292 cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
294 HNS3_CFG_MEDIA_TP_S);
295 cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
296 HNS3_CFG_RX_BUF_LEN_M,
297 HNS3_CFG_RX_BUF_LEN_S);
298 /* get mac address */
299 mac_addr_tmp = rte_le_to_cpu_32(req->param[2]);
300 mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
301 HNS3_CFG_MAC_ADDR_H_M,
302 HNS3_CFG_MAC_ADDR_H_S);
304 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
306 cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
307 HNS3_CFG_DEFAULT_SPEED_M,
308 HNS3_CFG_DEFAULT_SPEED_S);
309 cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),
311 HNS3_CFG_RSS_SIZE_S);
313 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
314 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
316 req = (struct hns3_cfg_param_cmd *)desc[1].data;
317 cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);
319 cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
320 HNS3_CFG_SPEED_ABILITY_M,
321 HNS3_CFG_SPEED_ABILITY_S);
322 cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),
323 HNS3_CFG_UMV_TBL_SPACE_M,
324 HNS3_CFG_UMV_TBL_SPACE_S);
326 cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;
329 /* hns3_get_board_cfg: query the static parameter from NCL_config file in flash
330 * @hw: pointer to struct hns3_hw
331 * @hcfg: the config structure to be getted
334 hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)
336 struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];
337 struct hns3_cfg_param_cmd *req;
342 for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {
344 req = (struct hns3_cfg_param_cmd *)desc[i].data;
345 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,
347 hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,
348 i * HNS3_CFG_RD_LEN_BYTES);
349 /* Len should be divided by 4 when send to hardware */
350 hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,
351 HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);
352 req->offset = rte_cpu_to_le_32(offset);
355 ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);
357 PMD_INIT_LOG(ERR, "get config failed %d.", ret);
361 hns3_parse_cfg(hcfg, desc);
367 hns3_parse_speed(int speed_cmd, uint32_t *speed)
370 case HNS3_CFG_SPEED_10M:
371 *speed = ETH_SPEED_NUM_10M;
373 case HNS3_CFG_SPEED_100M:
374 *speed = ETH_SPEED_NUM_100M;
376 case HNS3_CFG_SPEED_1G:
377 *speed = ETH_SPEED_NUM_1G;
379 case HNS3_CFG_SPEED_10G:
380 *speed = ETH_SPEED_NUM_10G;
382 case HNS3_CFG_SPEED_25G:
383 *speed = ETH_SPEED_NUM_25G;
385 case HNS3_CFG_SPEED_40G:
386 *speed = ETH_SPEED_NUM_40G;
388 case HNS3_CFG_SPEED_50G:
389 *speed = ETH_SPEED_NUM_50G;
391 case HNS3_CFG_SPEED_100G:
392 *speed = ETH_SPEED_NUM_100G;
402 hns3_get_board_configuration(struct hns3_hw *hw)
404 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
405 struct hns3_pf *pf = &hns->pf;
409 ret = hns3_get_board_cfg(hw, &cfg);
411 PMD_INIT_LOG(ERR, "get board config failed %d", ret);
415 if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER) {
416 PMD_INIT_LOG(ERR, "media type is copper, not supported.");
420 hw->mac.media_type = cfg.media_type;
421 hw->rss_size_max = cfg.rss_size_max;
422 hw->rx_buf_len = cfg.rx_buf_len;
423 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);
424 hw->mac.phy_addr = cfg.phy_addr;
425 hw->mac.default_addr_setted = false;
426 hw->num_tx_desc = cfg.tqp_desc_num;
427 hw->num_rx_desc = cfg.tqp_desc_num;
428 hw->dcb_info.num_pg = 1;
429 hw->dcb_info.hw_pfc_map = 0;
431 ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
433 PMD_INIT_LOG(ERR, "Get wrong speed %d, ret = %d",
434 cfg.default_speed, ret);
438 pf->tc_max = cfg.tc_num;
439 if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) {
440 PMD_INIT_LOG(WARNING,
441 "Get TC num(%u) from flash, set TC num to 1",
446 /* Dev does not support DCB */
447 if (!hns3_dev_dcb_supported(hw)) {
451 pf->pfc_max = pf->tc_max;
453 hw->dcb_info.num_tc = 1;
454 hw->alloc_rss_size = RTE_MIN(hw->rss_size_max,
455 hw->tqps_num / hw->dcb_info.num_tc);
456 hns3_set_bit(hw->hw_tc_map, 0, 1);
457 pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE;
459 pf->wanted_umv_size = cfg.umv_space;
465 hns3_get_configuration(struct hns3_hw *hw)
469 ret = hns3_query_function_status(hw);
471 PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret);
475 /* Get pf resource */
476 ret = hns3_query_pf_resource(hw);
478 PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret);
482 ret = hns3_get_board_configuration(hw);
484 PMD_INIT_LOG(ERR, "Failed to get board configuration: %d", ret);
492 hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid,
493 uint16_t tqp_vid, bool is_pf)
495 struct hns3_tqp_map_cmd *req;
496 struct hns3_cmd_desc desc;
499 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false);
501 req = (struct hns3_tqp_map_cmd *)desc.data;
502 req->tqp_id = rte_cpu_to_le_16(tqp_pid);
503 req->tqp_vf = func_id;
504 req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B;
506 req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B);
507 req->tqp_vid = rte_cpu_to_le_16(tqp_vid);
509 ret = hns3_cmd_send(hw, &desc, 1);
511 PMD_INIT_LOG(ERR, "TQP map failed %d", ret);
517 hns3_map_tqp(struct hns3_hw *hw)
519 uint16_t tqps_num = hw->total_tqps_num;
527 * In current version VF is not supported when PF is driven by DPDK
528 * driver, so we allocate tqps to PF as much as possible.
531 num = DIV_ROUND_UP(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
532 for (func_id = 0; func_id < num; func_id++) {
534 i < HNS3_MAX_TQP_NUM_PER_FUNC && tqp_id < tqps_num; i++) {
535 ret = hns3_map_tqps_to_func(hw, func_id, tqp_id++, i,
546 hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)
548 struct hns3_config_mac_speed_dup_cmd *req;
549 struct hns3_cmd_desc desc;
552 req = (struct hns3_config_mac_speed_dup_cmd *)desc.data;
554 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false);
556 hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);
559 case ETH_SPEED_NUM_10M:
560 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
561 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);
563 case ETH_SPEED_NUM_100M:
564 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
565 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);
567 case ETH_SPEED_NUM_1G:
568 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
569 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);
571 case ETH_SPEED_NUM_10G:
572 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
573 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);
575 case ETH_SPEED_NUM_25G:
576 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
577 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);
579 case ETH_SPEED_NUM_40G:
580 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
581 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);
583 case ETH_SPEED_NUM_50G:
584 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
585 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);
587 case ETH_SPEED_NUM_100G:
588 hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,
589 HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);
592 PMD_INIT_LOG(ERR, "invalid speed (%u)", speed);
596 hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1);
598 ret = hns3_cmd_send(hw, &desc, 1);
600 PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret);
606 hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
608 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
609 struct hns3_pf *pf = &hns->pf;
610 struct hns3_priv_buf *priv;
611 uint32_t i, total_size;
613 total_size = pf->pkt_buf_size;
615 /* alloc tx buffer for all enabled tc */
616 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
617 priv = &buf_alloc->priv_buf[i];
619 if (hw->hw_tc_map & BIT(i)) {
620 if (total_size < pf->tx_buf_size)
623 priv->tx_buf_size = pf->tx_buf_size;
625 priv->tx_buf_size = 0;
627 total_size -= priv->tx_buf_size;
634 hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
636 /* TX buffer size is unit by 128 byte */
637 #define HNS3_BUF_SIZE_UNIT_SHIFT 7
638 #define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15)
639 struct hns3_tx_buff_alloc_cmd *req;
640 struct hns3_cmd_desc desc;
645 req = (struct hns3_tx_buff_alloc_cmd *)desc.data;
647 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0);
648 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
649 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
651 buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT;
652 req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size |
653 HNS3_BUF_SIZE_UPDATE_EN_MSK);
656 ret = hns3_cmd_send(hw, &desc, 1);
658 PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret);
664 hns3_get_tc_num(struct hns3_hw *hw)
669 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
670 if (hw->hw_tc_map & BIT(i))
676 hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
678 struct hns3_priv_buf *priv;
679 uint32_t rx_priv = 0;
682 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
683 priv = &buf_alloc->priv_buf[i];
685 rx_priv += priv->buf_size;
691 hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)
693 uint32_t total_tx_size = 0;
696 for (i = 0; i < HNS3_MAX_TC_NUM; i++)
697 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
699 return total_tx_size;
702 /* Get the number of pfc enabled TCs, which have private buffer */
704 hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
706 struct hns3_priv_buf *priv;
710 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
711 priv = &buf_alloc->priv_buf[i];
712 if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
719 /* Get the number of pfc disabled TCs, which have private buffer */
721 hns3_get_no_pfc_priv_num(struct hns3_hw *hw,
722 struct hns3_pkt_buf_alloc *buf_alloc)
724 struct hns3_priv_buf *priv;
728 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
729 priv = &buf_alloc->priv_buf[i];
730 if (hw->hw_tc_map & BIT(i) &&
731 !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)
739 hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
742 uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
743 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
744 struct hns3_pf *pf = &hns->pf;
745 uint32_t shared_buf, aligned_mps;
750 tc_num = hns3_get_tc_num(hw);
751 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
753 if (hns3_dev_dcb_supported(hw))
754 shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
757 shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF
760 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
761 shared_std = roundup(max_t(uint32_t, shared_buf_min, shared_buf_tc),
764 rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);
765 if (rx_all < rx_priv + shared_std)
768 shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
769 buf_alloc->s_buf.buf_size = shared_buf;
770 if (hns3_dev_dcb_supported(hw)) {
771 buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
772 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
773 - roundup(aligned_mps / HNS3_BUF_DIV_BY,
776 buf_alloc->s_buf.self.high =
777 aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
778 buf_alloc->s_buf.self.low = aligned_mps;
781 if (hns3_dev_dcb_supported(hw)) {
782 hi_thrd = shared_buf - pf->dv_buf_size;
784 if (tc_num <= NEED_RESERVE_TC_NUM)
785 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
789 hi_thrd = hi_thrd / tc_num;
791 hi_thrd = max_t(uint32_t, hi_thrd,
792 HNS3_BUF_MUL_BY * aligned_mps);
793 hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);
794 lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;
796 hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;
797 lo_thrd = aligned_mps;
800 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
801 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
802 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
809 hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max,
810 struct hns3_pkt_buf_alloc *buf_alloc)
812 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
813 struct hns3_pf *pf = &hns->pf;
814 struct hns3_priv_buf *priv;
815 uint32_t aligned_mps;
819 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
820 aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
822 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
823 priv = &buf_alloc->priv_buf[i];
830 if (!(hw->hw_tc_map & BIT(i)))
834 if (hw->dcb_info.hw_pfc_map & BIT(i)) {
835 priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT;
836 priv->wl.high = roundup(priv->wl.low + aligned_mps,
840 priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) :
844 priv->buf_size = priv->wl.high + pf->dv_buf_size;
847 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
851 hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw,
852 struct hns3_pkt_buf_alloc *buf_alloc)
854 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
855 struct hns3_pf *pf = &hns->pf;
856 struct hns3_priv_buf *priv;
862 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
863 no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc);
865 /* let the last to be cleared first */
866 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
867 priv = &buf_alloc->priv_buf[i];
868 mask = BIT((uint8_t)i);
870 if (hw->hw_tc_map & mask &&
871 !(hw->dcb_info.hw_pfc_map & mask)) {
872 /* Clear the no pfc TC private buffer */
880 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
881 no_pfc_priv_num == 0)
885 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
889 hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
890 struct hns3_pkt_buf_alloc *buf_alloc)
892 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
893 struct hns3_pf *pf = &hns->pf;
894 struct hns3_priv_buf *priv;
900 rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
901 pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc);
903 /* let the last to be cleared first */
904 for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
905 priv = &buf_alloc->priv_buf[i];
906 mask = BIT((uint8_t)i);
908 if (hw->hw_tc_map & mask &&
909 hw->dcb_info.hw_pfc_map & mask) {
910 /* Reduce the number of pfc TC with private buffer */
917 if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||
922 return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);
926 hns3_only_alloc_priv_buff(struct hns3_hw *hw,
927 struct hns3_pkt_buf_alloc *buf_alloc)
929 #define COMPENSATE_BUFFER 0x3C00
930 #define COMPENSATE_HALF_MPS_NUM 5
931 #define PRIV_WL_GAP 0x1800
932 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
933 struct hns3_pf *pf = &hns->pf;
934 uint32_t tc_num = hns3_get_tc_num(hw);
935 uint32_t half_mps = pf->mps >> 1;
936 struct hns3_priv_buf *priv;
937 uint32_t min_rx_priv;
941 rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);
943 rx_priv = rx_priv / tc_num;
945 if (tc_num <= NEED_RESERVE_TC_NUM)
946 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
949 * Minimum value of private buffer in rx direction (min_rx_priv) is
950 * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private
951 * buffer if rx_priv is greater than min_rx_priv.
953 min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER +
954 COMPENSATE_HALF_MPS_NUM * half_mps;
955 min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);
956 rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);
958 if (rx_priv < min_rx_priv)
961 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
962 priv = &buf_alloc->priv_buf[i];
969 if (!(hw->hw_tc_map & BIT(i)))
973 priv->buf_size = rx_priv;
974 priv->wl.high = rx_priv - pf->dv_buf_size;
975 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
978 buf_alloc->s_buf.buf_size = 0;
984 * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
985 * @hw: pointer to struct hns3_hw
986 * @buf_alloc: pointer to buffer calculation data
987 * @return: 0: calculate sucessful, negative: fail
990 hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
992 /* When DCB is not supported, rx private buffer is not allocated. */
993 if (!hns3_dev_dcb_supported(hw)) {
994 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
995 struct hns3_pf *pf = &hns->pf;
996 uint32_t rx_all = pf->pkt_buf_size;
998 rx_all -= hns3_get_tx_buff_alloced(buf_alloc);
999 if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all))
1006 * Try to allocate privated packet buffer for all TCs without share
1009 if (hns3_only_alloc_priv_buff(hw, buf_alloc))
1013 * Try to allocate privated packet buffer for all TCs with share
1016 if (hns3_rx_buf_calc_all(hw, true, buf_alloc))
1020 * For different application scenes, the enabled port number, TC number
1021 * and no_drop TC number are different. In order to obtain the better
1022 * performance, software could allocate the buffer size and configure
1023 * the waterline by tring to decrease the private buffer size according
1024 * to the order, namely, waterline of valided tc, pfc disabled tc, pfc
1027 if (hns3_rx_buf_calc_all(hw, false, buf_alloc))
1030 if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc))
1033 if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc))
1040 hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
1042 struct hns3_rx_priv_buff_cmd *req;
1043 struct hns3_cmd_desc desc;
1048 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false);
1049 req = (struct hns3_rx_priv_buff_cmd *)desc.data;
1051 /* Alloc private buffer TCs */
1052 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
1053 struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i];
1056 rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S);
1057 req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B);
1060 buf_size = buf_alloc->s_buf.buf_size;
1061 req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) |
1062 (1 << HNS3_TC0_PRI_BUF_EN_B));
1064 ret = hns3_cmd_send(hw, &desc, 1);
1066 PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret);
1072 hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
1074 #define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2
1075 struct hns3_rx_priv_wl_buf *req;
1076 struct hns3_priv_buf *priv;
1077 struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM];
1081 for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) {
1082 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC,
1084 req = (struct hns3_rx_priv_wl_buf *)desc[i].data;
1086 /* The first descriptor set the NEXT bit to 1 */
1088 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1090 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1092 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
1093 uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j;
1095 priv = &buf_alloc->priv_buf[idx];
1096 req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >>
1098 req->tc_wl[j].high |=
1099 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
1100 req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >>
1102 req->tc_wl[j].low |=
1103 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
1107 /* Send 2 descriptor at one time */
1108 ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM);
1110 PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d",
1116 hns3_common_thrd_config(struct hns3_hw *hw,
1117 struct hns3_pkt_buf_alloc *buf_alloc)
1119 #define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2
1120 struct hns3_shared_buf *s_buf = &buf_alloc->s_buf;
1121 struct hns3_rx_com_thrd *req;
1122 struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM];
1123 struct hns3_tc_thrd *tc;
1128 for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) {
1129 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC,
1131 req = (struct hns3_rx_com_thrd *)&desc[i].data;
1133 /* The first descriptor set the NEXT bit to 1 */
1135 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1137 desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1139 for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {
1140 tc_idx = i * HNS3_TC_NUM_ONE_DESC + j;
1141 tc = &s_buf->tc_thrd[tc_idx];
1143 req->com_thrd[j].high =
1144 rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S);
1145 req->com_thrd[j].high |=
1146 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
1147 req->com_thrd[j].low =
1148 rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S);
1149 req->com_thrd[j].low |=
1150 rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
1154 /* Send 2 descriptors at one time */
1155 ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM);
1157 PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret);
1163 hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
1165 struct hns3_shared_buf *buf = &buf_alloc->s_buf;
1166 struct hns3_rx_com_wl *req;
1167 struct hns3_cmd_desc desc;
1170 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false);
1172 req = (struct hns3_rx_com_wl *)desc.data;
1173 req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S);
1174 req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
1176 req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S);
1177 req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));
1179 ret = hns3_cmd_send(hw, &desc, 1);
1181 PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret);
1187 hns3_buffer_alloc(struct hns3_hw *hw)
1189 struct hns3_pkt_buf_alloc pkt_buf;
1192 memset(&pkt_buf, 0, sizeof(pkt_buf));
1193 ret = hns3_tx_buffer_calc(hw, &pkt_buf);
1196 "could not calc tx buffer size for all TCs %d",
1201 ret = hns3_tx_buffer_alloc(hw, &pkt_buf);
1203 PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret);
1207 ret = hns3_rx_buffer_calc(hw, &pkt_buf);
1210 "could not calc rx priv buffer size for all TCs %d",
1215 ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf);
1217 PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret);
1221 if (hns3_dev_dcb_supported(hw)) {
1222 ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
1225 "could not configure rx private waterline %d",
1230 ret = hns3_common_thrd_config(hw, &pkt_buf);
1233 "could not configure common threshold %d",
1239 ret = hns3_common_wl_config(hw, &pkt_buf);
1241 PMD_INIT_LOG(ERR, "could not configure common waterline %d",
1248 hns3_mac_init(struct hns3_hw *hw)
1250 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1251 struct hns3_mac *mac = &hw->mac;
1252 struct hns3_pf *pf = &hns->pf;
1255 pf->support_sfp_query = true;
1256 mac->link_duplex = ETH_LINK_FULL_DUPLEX;
1257 ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);
1259 PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret);
1263 mac->link_status = ETH_LINK_DOWN;
1265 return hns3_config_mtu(hw, pf->mps);
1269 hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
1271 #define HNS3_ETHERTYPE_SUCCESS_ADD 0
1272 #define HNS3_ETHERTYPE_ALREADY_ADD 1
1273 #define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2
1274 #define HNS3_ETHERTYPE_KEY_CONFLICT 3
1279 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
1284 switch (resp_code) {
1285 case HNS3_ETHERTYPE_SUCCESS_ADD:
1286 case HNS3_ETHERTYPE_ALREADY_ADD:
1289 case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW:
1291 "add mac ethertype failed for manager table overflow.");
1292 return_status = -EIO;
1294 case HNS3_ETHERTYPE_KEY_CONFLICT:
1295 PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict.");
1296 return_status = -EIO;
1300 "add mac ethertype failed for undefined, code=%d.",
1302 return_status = -EIO;
1305 return return_status;
1309 hns3_add_mgr_tbl(struct hns3_hw *hw,
1310 const struct hns3_mac_mgr_tbl_entry_cmd *req)
1312 struct hns3_cmd_desc desc;
1317 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false);
1318 memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd));
1320 ret = hns3_cmd_send(hw, &desc, 1);
1323 "add mac ethertype failed for cmd_send, ret =%d.",
1328 resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;
1329 retval = rte_le_to_cpu_16(desc.retval);
1331 return hns3_get_mac_ethertype_cmd_status(retval, resp_code);
1335 hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table,
1336 int *table_item_num)
1338 struct hns3_mac_mgr_tbl_entry_cmd *tbl;
1341 * In current version, we add one item in management table as below:
1342 * 0x0180C200000E -- LLDP MC address
1345 tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B;
1346 tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP);
1347 tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200));
1348 tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E));
1349 tbl->i_port_bitmap = 0x1;
1350 *table_item_num = 1;
1354 hns3_init_mgr_tbl(struct hns3_hw *hw)
1356 #define HNS_MAC_MGR_TBL_MAX_SIZE 16
1357 struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE];
1362 memset(mgr_table, 0, sizeof(mgr_table));
1363 hns3_prepare_mgr_tbl(mgr_table, &table_item_num);
1364 for (i = 0; i < table_item_num; i++) {
1365 ret = hns3_add_mgr_tbl(hw, &mgr_table[i]);
1367 PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d",
1377 hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,
1378 bool en_mc, bool en_bc, int vport_id)
1383 memset(param, 0, sizeof(struct hns3_promisc_param));
1385 param->enable = HNS3_PROMISC_EN_UC;
1387 param->enable |= HNS3_PROMISC_EN_MC;
1389 param->enable |= HNS3_PROMISC_EN_BC;
1390 param->vf_id = vport_id;
1394 hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)
1396 struct hns3_promisc_cfg_cmd *req;
1397 struct hns3_cmd_desc desc;
1400 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);
1402 req = (struct hns3_promisc_cfg_cmd *)desc.data;
1403 req->vf_id = param->vf_id;
1404 req->flag = (param->enable << HNS3_PROMISC_EN_B) |
1405 HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;
1407 ret = hns3_cmd_send(hw, &desc, 1);
1409 PMD_INIT_LOG(ERR, "Set promisc mode fail, status is %d", ret);
1415 hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
1417 struct hns3_promisc_param param;
1418 bool en_bc_pmc = true;
1423 * In current version VF is not supported when PF is driven by DPDK
1424 * driver, the PF-related vf_id is 0, just need to configure parameters
1429 hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);
1430 ret = hns3_cmd_set_promisc_mode(hw, ¶m);
1438 hns3_init_hardware(struct hns3_adapter *hns)
1440 struct hns3_hw *hw = &hns->hw;
1443 ret = hns3_map_tqp(hw);
1445 PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret);
1449 ret = hns3_init_umv_space(hw);
1451 PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret);
1455 ret = hns3_mac_init(hw);
1457 PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret);
1461 ret = hns3_init_mgr_tbl(hw);
1463 PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret);
1467 ret = hns3_set_promisc_mode(hw, false, false);
1469 PMD_INIT_LOG(ERR, "Failed to set promisc mode: %d", ret);
1473 ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
1475 PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
1479 ret = hns3_config_gro(hw, false);
1481 PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1487 hns3_uninit_umv_space(hw);
1492 hns3_init_pf(struct rte_eth_dev *eth_dev)
1494 struct rte_device *dev = eth_dev->device;
1495 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
1496 struct hns3_adapter *hns = eth_dev->data->dev_private;
1497 struct hns3_hw *hw = &hns->hw;
1500 PMD_INIT_FUNC_TRACE();
1502 /* Get hardware io base address from pcie BAR2 IO space */
1503 hw->io_base = pci_dev->mem_resource[2].addr;
1505 /* Firmware command queue initialize */
1506 ret = hns3_cmd_init_queue(hw);
1508 PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1509 goto err_cmd_init_queue;
1512 /* Firmware command initialize */
1513 ret = hns3_cmd_init(hw);
1515 PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1519 /* Get configuration */
1520 ret = hns3_get_configuration(hw);
1522 PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1523 goto err_get_config;
1526 ret = hns3_init_hardware(hns);
1528 PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
1529 goto err_get_config;
1535 hns3_cmd_uninit(hw);
1538 hns3_cmd_destroy_queue(hw);
1547 hns3_uninit_pf(struct rte_eth_dev *eth_dev)
1549 struct hns3_adapter *hns = eth_dev->data->dev_private;
1550 struct hns3_hw *hw = &hns->hw;
1552 PMD_INIT_FUNC_TRACE();
1554 hns3_uninit_umv_space(hw);
1555 hns3_cmd_uninit(hw);
1556 hns3_cmd_destroy_queue(hw);
1561 hns3_dev_close(struct rte_eth_dev *eth_dev)
1563 struct hns3_adapter *hns = eth_dev->data->dev_private;
1564 struct hns3_hw *hw = &hns->hw;
1566 hw->adapter_state = HNS3_NIC_CLOSING;
1567 hns3_uninit_pf(eth_dev);
1568 hw->adapter_state = HNS3_NIC_CLOSED;
1571 static const struct eth_dev_ops hns3_eth_dev_ops = {
1572 .dev_close = hns3_dev_close,
1576 hns3_dev_init(struct rte_eth_dev *eth_dev)
1578 struct hns3_adapter *hns = eth_dev->data->dev_private;
1579 struct hns3_hw *hw = &hns->hw;
1582 PMD_INIT_FUNC_TRACE();
1584 eth_dev->dev_ops = &hns3_eth_dev_ops;
1585 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1588 hw->adapter_state = HNS3_NIC_UNINITIALIZED;
1590 hw->data = eth_dev->data;
1593 * Set default max packet size according to the mtu
1594 * default vale in DPDK frame.
1596 hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD;
1598 ret = hns3_init_pf(eth_dev);
1600 PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
1604 /* Allocate memory for storing MAC addresses */
1605 eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac",
1606 sizeof(struct rte_ether_addr) *
1607 HNS3_UC_MACADDR_NUM, 0);
1608 if (eth_dev->data->mac_addrs == NULL) {
1609 PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed "
1610 "to store MAC addresses",
1611 sizeof(struct rte_ether_addr) *
1612 HNS3_UC_MACADDR_NUM);
1614 goto err_rte_zmalloc;
1617 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
1618 ð_dev->data->mac_addrs[0]);
1620 hw->adapter_state = HNS3_NIC_INITIALIZED;
1622 * Pass the information to the rte_eth_dev_close() that it should also
1623 * release the private port resources.
1625 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1627 hns3_info(hw, "hns3 dev initialization successful!");
1631 hns3_uninit_pf(eth_dev);
1634 eth_dev->dev_ops = NULL;
1635 eth_dev->rx_pkt_burst = NULL;
1636 eth_dev->tx_pkt_burst = NULL;
1637 eth_dev->tx_pkt_prepare = NULL;
1642 hns3_dev_uninit(struct rte_eth_dev *eth_dev)
1644 struct hns3_adapter *hns = eth_dev->data->dev_private;
1645 struct hns3_hw *hw = &hns->hw;
1647 PMD_INIT_FUNC_TRACE();
1649 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1652 eth_dev->dev_ops = NULL;
1653 eth_dev->rx_pkt_burst = NULL;
1654 eth_dev->tx_pkt_burst = NULL;
1655 eth_dev->tx_pkt_prepare = NULL;
1656 if (hw->adapter_state < HNS3_NIC_CLOSING)
1657 hns3_dev_close(eth_dev);
1659 hw->adapter_state = HNS3_NIC_REMOVED;
1664 eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1665 struct rte_pci_device *pci_dev)
1667 return rte_eth_dev_pci_generic_probe(pci_dev,
1668 sizeof(struct hns3_adapter),
1673 eth_hns3_pci_remove(struct rte_pci_device *pci_dev)
1675 return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit);
1678 static const struct rte_pci_id pci_id_hns3_map[] = {
1679 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) },
1680 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) },
1681 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) },
1682 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
1683 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
1684 { .vendor_id = 0, /* sentinel */ },
1687 static struct rte_pci_driver rte_hns3_pmd = {
1688 .id_table = pci_id_hns3_map,
1689 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1690 .probe = eth_hns3_pci_probe,
1691 .remove = eth_hns3_pci_remove,
1694 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
1695 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
1696 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
1698 RTE_INIT(hns3_init_log)
1700 hns3_logtype_init = rte_log_register("pmd.net.hns3.init");
1701 if (hns3_logtype_init >= 0)
1702 rte_log_set_level(hns3_logtype_init, RTE_LOG_NOTICE);
1703 hns3_logtype_driver = rte_log_register("pmd.net.hns3.driver");
1704 if (hns3_logtype_driver >= 0)
1705 rte_log_set_level(hns3_logtype_driver, RTE_LOG_NOTICE);