/* fm10k defines */
#define RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE 1
+/* hns3 defines */
+#define RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF 256
+
/* i40e defines */
#define RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC 1
#undef RTE_LIBRTE_I40E_16BYTE_RX_DESC
#define HNS3_CMDQ_RX_INVLD_B 0
#define HNS3_CMDQ_RX_OUTVLD_B 1
#define HNS3_CMD_DESC_ALIGNMENT 4096
-#define HNS3_QUEUE_ID_MASK 0x1ff
#define HNS3_CMD_FLAG_NEXT BIT(2)
struct hns3_hw;
uint16_t pf_own_fun_number;
uint16_t tx_buf_size;
uint16_t dv_buf_size;
- uint16_t tqp_num_ext;
+ /* number of queues that exceed 1024 */
+ uint16_t ext_tqp_num;
uint16_t roh_pf_intr_vector_number;
uint32_t rsv[1];
};
uint8_t rsv[22];
};
-#define HNS3_RING_ID_MASK GENMASK(9, 0)
#define HNS3_TQP_ENABLE_B 0
#define HNS3_MAC_CFG_AN_EN_B 0
uint32_t max_tm_rate;
};
-#define HNS3_MAX_TQP_NUM_PER_FUNC 64
+#define HNS3_MAX_TQP_NUM_HIP08_PF 64
#define HNS3_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HNS3_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HNS3_DEFAULT_DV 0xA000 /* 40k byte */
return ret;
}
-void
+static int
hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
{
struct hns3_rss_conf *rss_cfg = &hw->rss_info;
uint16_t rx_qnum_per_tc;
+ uint16_t used_rx_queues;
int i;
rx_qnum_per_tc = nb_rx_q / hw->num_tc;
- rx_qnum_per_tc = RTE_MIN(hw->rss_size_max, rx_qnum_per_tc);
- if (hw->alloc_rss_size != rx_qnum_per_tc) {
- hns3_info(hw, "rss size changes from %u to %u",
- hw->alloc_rss_size, rx_qnum_per_tc);
- hw->alloc_rss_size = rx_qnum_per_tc;
+ if (rx_qnum_per_tc > hw->rss_size_max) {
+ hns3_err(hw, "rx queue number of per tc (%u) is greater than "
+ "value (%u) hardware supported.",
+ rx_qnum_per_tc, hw->rss_size_max);
+ return -EINVAL;
}
- hw->used_rx_queues = hw->num_tc * hw->alloc_rss_size;
+
+ used_rx_queues = hw->num_tc * rx_qnum_per_tc;
+ if (used_rx_queues != nb_rx_q) {
+ hns3_err(hw, "rx queue number (%u) configured must be an "
+ "integral multiple of valid tc number (%u).",
+ nb_rx_q, hw->num_tc);
+ return -EINVAL;
+ }
+ hw->alloc_rss_size = rx_qnum_per_tc;
+ hw->used_rx_queues = used_rx_queues;
/*
* When rss size is changed, we need to update rss redirection table
rss_cfg->rss_indirection_tbl[i] =
i % hw->alloc_rss_size;
}
+
+ return 0;
}
-void
-hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue)
+static int
+hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
{
struct hns3_tc_queue_info *tc_queue;
+ uint16_t used_tx_queues;
+ uint16_t tx_qnum_per_tc;
uint8_t i;
- hw->tx_qnum_per_tc = nb_queue / hw->num_tc;
+ tx_qnum_per_tc = nb_tx_q / hw->num_tc;
+ used_tx_queues = hw->num_tc * tx_qnum_per_tc;
+ if (used_tx_queues != nb_tx_q) {
+ hns3_err(hw, "tx queue number (%u) configured must be an "
+ "integral multiple of valid tc number (%u).",
+ nb_tx_q, hw->num_tc);
+ return -EINVAL;
+ }
+
+ hw->used_tx_queues = used_tx_queues;
+ hw->tx_qnum_per_tc = tx_qnum_per_tc;
for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
tc_queue = &hw->tc_queue[i];
if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
tc_queue->tc = 0;
}
}
- hw->used_tx_queues = hw->num_tc * hw->tx_qnum_per_tc;
+
+ return 0;
}
-static void
+int
+hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
+{
+ int ret;
+
+ ret = hns3_set_rss_size(hw, nb_rx_q);
+ if (ret)
+ return ret;
+
+ return hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
+}
+
+static int
hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
uint16_t nb_tx_q)
{
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
struct hns3_pf *pf = &hns->pf;
+ int ret;
hw->num_tc = hw->dcb_info.num_tc;
- hns3_set_rss_size(hw, nb_rx_q);
- hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
+ ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
+ if (ret)
+ return ret;
if (!hns->is_vf)
memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
+
+ return 0;
}
int
{
struct hns3_nq_to_qs_link_cmd *map;
struct hns3_cmd_desc desc;
+ uint16_t tmp_qs_id = 0;
+ uint16_t qs_id_l;
+ uint16_t qs_id_h;
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
map->nq_id = rte_cpu_to_le_16(q_id);
- map->qset_id = rte_cpu_to_le_16(qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
+
+ /*
+ * Network engine with revision_id 0x21 uses 0~9 bit of qs_id to
+ * configure qset_id. So we need to convert qs_id to the follow
+ * format to support qset_id > 1024.
+ * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
+ * / / \ \
+ * / / \ \
+ * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
+ * | qs_id_h | vld | qs_id_l |
+ */
+ qs_id_l = hns3_get_field(qs_id, HNS3_DCB_QS_ID_L_MSK,
+ HNS3_DCB_QS_ID_L_S);
+ qs_id_h = hns3_get_field(qs_id, HNS3_DCB_QS_ID_H_MSK,
+ HNS3_DCB_QS_ID_H_S);
+ hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_L_MSK, HNS3_DCB_QS_ID_L_S,
+ qs_id_l);
+ hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_H_EXT_MSK,
+ HNS3_DCB_QS_ID_H_EXT_S, qs_id_h);
+ map->qset_id = rte_cpu_to_le_16(tmp_qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
return hns3_cmd_send(hw, &desc, 1);
}
*changed = true;
}
-static void
+static int
hns3_dcb_info_cfg(struct hns3_adapter *hns)
{
struct rte_eth_dcb_rx_conf *dcb_rx_conf;
struct hns3_hw *hw = &hns->hw;
uint8_t tc_bw, bw_rest;
uint8_t i, j;
+ int ret;
dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
- hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
- hw->data->nb_tx_queues);
+ ret = hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
+ hw->data->nb_tx_queues);
+ if (ret)
+ hns3_err(hw, "update tc queue mapping failed, ret = %d.", ret);
+
+ return ret;
}
static int
hw->dcb_info.num_tc = 1;
}
hw->hw_tc_map = bit_map;
- hns3_dcb_info_cfg(hns);
- return 0;
+ return hns3_dcb_info_cfg(hns);
}
static int
{
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
struct hns3_pf *pf = &hns->pf;
+ uint16_t default_tqp_num;
int ret;
PMD_INIT_FUNC_TRACE();
ret = hns3_dcb_info_init(hw);
if (ret) {
- hns3_err(hw, "dcb info init failed: %d", ret);
+ hns3_err(hw, "dcb info init failed, ret = %d.", ret);
+ return ret;
+ }
+
+ /*
+ * The number of queues configured by default cannot exceed
+ * the maximum number of queues for a single TC.
+ */
+ default_tqp_num = RTE_MIN(hw->rss_size_max,
+ hw->tqps_num / hw->dcb_info.num_tc);
+ ret = hns3_dcb_update_tc_queue_mapping(hw, default_tqp_num,
+ default_tqp_num);
+ if (ret) {
+ hns3_err(hw,
+ "update tc queue mapping failed, ret = %d.",
+ ret);
return ret;
}
- hns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num,
- hw->tqps_num);
}
/*
*/
ret = hns3_dcb_init_hw(hw);
if (ret) {
- hns3_err(hw, "dcb init hardware failed: %d", ret);
+ hns3_err(hw, "dcb init hardware failed, ret = %d.", ret);
return ret;
}
uint16_t nb_tx_q = hw->data->nb_tx_queues;
int ret;
- hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
+ ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
+ if (ret) {
+ hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
+ ret);
+ return ret;
+ }
ret = hns3_q_to_qs_map(hw);
if (ret)
- hns3_err(hw, "failed to map nq to qs! ret = %d", ret);
+ hns3_err(hw, "failed to map nq to qs, ret = %d.", ret);
return ret;
}
uint16_t rsvd;
uint8_t priority;
#define HNS3_DCB_QS_PRI_LINK_VLD_MSK BIT(0)
+#define HNS3_DCB_QS_ID_L_MSK GENMASK(9, 0)
+#define HNS3_DCB_QS_ID_L_S 0
+#define HNS3_DCB_QS_ID_H_MSK GENMASK(14, 10)
+#define HNS3_DCB_QS_ID_H_S 10
+#define HNS3_DCB_QS_ID_H_EXT_S 11
+#define HNS3_DCB_QS_ID_H_EXT_MSK GENMASK(15, 11)
uint8_t link_vld;
uint8_t rsvd1[18];
};
uint32_t rsvd1[4];
};
-#define HNS3_BP_GRP_NUM 32
+#define HNS3_BP_GRP_NUM 32
#define HNS3_BP_SUB_GRP_ID_S 0
#define HNS3_BP_SUB_GRP_ID_M GENMASK(4, 0)
#define HNS3_BP_GRP_ID_S 5
#define HNS3_BP_GRP_ID_M GENMASK(9, 5)
+
struct hns3_bp_to_qs_map_cmd {
uint8_t tc_id;
uint8_t rsvd[2];
int hns3_dcb_info_init(struct hns3_hw *hw);
-int
-hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
-
-int
-hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf);
+int hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
-void hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q);
+int hns3_dcb_pfc_enable(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_conf *pfc_conf);
-void hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue);
+int hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
+ uint16_t nb_tx_q);
int hns3_dcb_cfg_update(struct hns3_adapter *hns);
return hns3_parse_func_status(hw, req);
}
+static int
+hns3_get_pf_max_tqp_num(struct hns3_hw *hw)
+{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ struct hns3_pf *pf = &hns->pf;
+
+ if (pf->tqp_config_mode == HNS3_FLEX_MAX_TQP_NUM_MODE) {
+ /*
+ * The total_tqps_num obtained from firmware is maximum tqp
+ * numbers of this port, which should be used for PF and VFs.
+ * There is no need for pf to have so many tqp numbers in
+ * most cases. RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
+ * coming from config file, is assigned to maximum queue number
+ * for the PF of this port by user. So users can modify the
+ * maximum queue number of PF according to their own application
+ * scenarios, which is more flexible to use. In addition, many
+ * memories can be saved due to allocating queue statistics
+ * room according to the actual number of queues required. The
+ * maximum queue number of PF for network engine with
+ * revision_id greater than 0x30 is assigned by config file.
+ */
+ if (RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF <= 0) {
+ hns3_err(hw, "RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF(%d) "
+ "must be greater than 0.",
+ RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF);
+ return -EINVAL;
+ }
+
+ hw->tqps_num = RTE_MIN(RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF,
+ hw->total_tqps_num);
+ } else {
+ /*
+ * Due to the limitation on the number of PF interrupts
+ * available, the maximum queue number assigned to PF on
+ * the network engine with revision_id 0x21 is 64.
+ */
+ hw->tqps_num = RTE_MIN(hw->total_tqps_num,
+ HNS3_MAX_TQP_NUM_HIP08_PF);
+ }
+
+ return 0;
+}
+
static int
hns3_query_pf_resource(struct hns3_hw *hw)
{
}
req = (struct hns3_pf_res_cmd *)desc.data;
- hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num);
+ hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num) +
+ rte_le_to_cpu_16(req->ext_tqp_num);
+ ret = hns3_get_pf_max_tqp_num(hw);
+ if (ret)
+ return ret;
+
pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;
- hw->tqps_num = RTE_MIN(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number);
if (req->tx_buf_size)
static int
hns3_get_capability(struct hns3_hw *hw)
{
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
struct rte_pci_device *pci_dev;
+ struct hns3_pf *pf = &hns->pf;
struct rte_eth_dev *eth_dev;
uint16_t device_id;
uint8_t revision;
hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
hw->vlan_mode = HNS3_SW_SHIFT_AND_DISCARD_MODE;
hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
+ pf->tqp_config_mode = HNS3_FIXED_MAX_TQP_NUM_MODE;
return 0;
}
hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE;
hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
+ pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE;
return 0;
}
ret = hns3_get_board_configuration(hw);
if (ret)
- PMD_INIT_LOG(ERR, "Failed to get board configuration: %d", ret);
+ PMD_INIT_LOG(ERR, "failed to get board configuration: %d", ret);
return ret;
}
static int
hns3_map_tqp(struct hns3_hw *hw)
{
- uint16_t tqps_num = hw->total_tqps_num;
- uint16_t func_id;
- uint16_t tqp_id;
- bool is_pf;
- int num;
int ret;
int i;
/*
- * In current version VF is not supported when PF is driven by DPDK
- * driver, so we allocate tqps to PF as much as possible.
+ * In current version, VF is not supported when PF is driven by DPDK
+ * driver, so we assign total tqps_num tqps allocated to this port
+ * to PF.
*/
- tqp_id = 0;
- num = DIV_ROUND_UP(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
- for (func_id = HNS3_PF_FUNC_ID; func_id < num; func_id++) {
- is_pf = func_id == HNS3_PF_FUNC_ID ? true : false;
- for (i = 0;
- i < HNS3_MAX_TQP_NUM_PER_FUNC && tqp_id < tqps_num; i++) {
- ret = hns3_map_tqps_to_func(hw, func_id, tqp_id++, i,
- is_pf);
- if (ret)
- return ret;
- }
+ for (i = 0; i < hw->total_tqps_num; i++) {
+ ret = hns3_map_tqps_to_func(hw, HNS3_PF_FUNC_ID, i, i, true);
+ if (ret)
+ return ret;
}
return 0;
goto err_get_config;
}
+ ret = hns3_tqp_stats_init(hw);
+ if (ret)
+ goto err_get_config;
+
ret = hns3_init_hardware(hns);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret);
- goto err_get_config;
+ goto err_init_hw;
}
/* Initialize flow director filter list & hash */
ret = hns3_fdir_filter_init(hns);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
- goto err_hw_init;
+ goto err_fdir;
}
hns3_set_default_rss_args(hw);
if (ret) {
PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d",
ret);
- goto err_fdir;
+ goto err_enable_intr;
}
return 0;
-err_fdir:
+err_enable_intr:
hns3_fdir_filter_uninit(hns);
-err_hw_init:
+err_fdir:
hns3_uninit_umv_space(hw);
-
+err_init_hw:
+ hns3_tqp_stats_uninit(hw);
err_get_config:
hns3_pf_disable_irq0(hw);
rte_intr_disable(&pci_dev->intr_handle);
hns3_promisc_uninit(hw);
hns3_fdir_filter_uninit(hns);
hns3_uninit_umv_space(hw);
+ hns3_tqp_stats_uninit(hw);
hns3_pf_disable_irq0(hw);
rte_intr_disable(&pci_dev->intr_handle);
hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
};
struct hns3_tc_queue_info {
- uint8_t tqp_offset; /* TQP offset from base TQP */
- uint8_t tqp_count; /* Total TQPs */
- uint8_t tc; /* TC index */
+ uint16_t tqp_offset; /* TQP offset from base TQP */
+ uint16_t tqp_count; /* Total TQPs */
+ uint8_t tc; /* TC index */
bool enable; /* If this TC is enable or not */
};
uint32_t ol4table[HNS3_OL4TBL_NUM];
};
+#define HNS3_FIXED_MAX_TQP_NUM_MODE 0
+#define HNS3_FLEX_MAX_TQP_NUM_MODE 1
+
struct hns3_pf {
struct hns3_adapter *adapter;
bool is_main_pf;
uint16_t func_num; /* num functions of this pf, include pf and vfs */
+ /*
+ * tqp_config mode
+ * tqp_config_mode value range:
+ * HNS3_FIXED_MAX_TQP_NUM_MODE,
+ * HNS3_FLEX_MAX_TQP_NUM_MODE
+ *
+ * - HNS3_FIXED_MAX_TQP_NUM_MODE
+ * There is a limitation on the number of pf interrupts available for
+ * on some versions of network engines. In this case, the maximum
+ * queue number of pf can not be greater than the interrupt number,
+ * such as pf of network engine with revision_id 0x21. So the maximum
+ * number of queues must be fixed.
+ *
+ * - HNS3_FLEX_MAX_TQP_NUM_MODE
+ * In this mode, the maximum queue number of pf has not any constraint
+ * and comes from the macro RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF
+ * in the config file. Users can modify the macro according to their
+ * own application scenarios, which is more flexible to use.
+ */
+ uint8_t tqp_config_mode;
+
uint32_t pkt_buf_size; /* Total pf buf size for tx/rx */
uint32_t tx_buf_size; /* Tx buffer size for each TC */
uint32_t dv_buf_size; /* Dv buffer size for each TC */
static int
hns3vf_check_tqp_info(struct hns3_hw *hw)
{
- uint16_t tqps_num;
+ if (hw->tqps_num == 0) {
+ PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF.");
+ return -EINVAL;
+ }
- tqps_num = hw->tqps_num;
- if (tqps_num > HNS3_MAX_TQP_NUM_PER_FUNC || tqps_num == 0) {
- PMD_INIT_LOG(ERR, "Get invalid tqps_num(%u) from PF. valid "
- "range: 1~%d",
- tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);
+ if (hw->rss_size_max == 0) {
+ PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF.");
return -EINVAL;
}
- hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, hw->tqps_num);
+ hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num);
return 0;
}
+
static int
hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
{
{
uint8_t resp_msg;
int ret;
+ int i;
ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0,
true, &resp_msg, sizeof(resp_msg));
hw->hw_tc_map = resp_msg;
+ for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
+ if (hw->hw_tc_map & BIT(i))
+ hw->num_tc++;
+ }
+
return 0;
}
}
static int
-hns3vf_set_tc_info(struct hns3_adapter *hns)
+hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q,
+ uint16_t nb_tx_q)
{
struct hns3_hw *hw = &hns->hw;
- uint16_t nb_rx_q = hw->data->nb_rx_queues;
- uint16_t nb_tx_q = hw->data->nb_tx_queues;
- uint8_t i;
-
- hw->num_tc = 0;
- for (i = 0; i < HNS3_MAX_TC_NUM; i++)
- if (hw->hw_tc_map & BIT(i))
- hw->num_tc++;
if (nb_rx_q < hw->num_tc) {
hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
return -EINVAL;
}
- hns3_set_rss_size(hw, nb_rx_q);
- hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
-
- return 0;
+ return hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
}
static void
goto err_get_config;
}
+ ret = hns3_tqp_stats_init(hw);
+ if (ret)
+ goto err_get_config;
+
+ ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
+ goto err_set_tc_queue;
+ }
+
ret = hns3vf_clear_vport_list(hw);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
- goto err_get_config;
+ goto err_set_tc_queue;
}
ret = hns3vf_init_hardware(hns);
if (ret)
- goto err_get_config;
+ goto err_set_tc_queue;
hns3_set_default_rss_args(hw);
return 0;
+err_set_tc_queue:
+ hns3_tqp_stats_uninit(hw);
+
err_get_config:
hns3vf_disable_irq0(hw);
rte_intr_disable(&pci_dev->intr_handle);
(void)hns3_config_gro(hw, false);
(void)hns3vf_set_alive(hw, false);
(void)hns3vf_set_promisc_mode(hw, false, false, false);
+ hns3_tqp_stats_uninit(hw);
hns3vf_disable_irq0(hw);
rte_intr_disable(&pci_dev->intr_handle);
hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
{
struct hns3_hw *hw = &hns->hw;
+ uint16_t nb_rx_q = hw->data->nb_rx_queues;
+ uint16_t nb_tx_q = hw->data->nb_tx_queues;
int ret;
- ret = hns3vf_set_tc_info(hns);
+ ret = hns3vf_set_tc_queue_mapping(hns, nb_rx_q, nb_tx_q);
if (ret)
return ret;
#define HNS3_FD_AD_QUEUE_REGION_SIZE_M GENMASK(20, 17)
#define HNS3_FD_AD_COUNTER_HIGH_BIT 7
#define HNS3_FD_AD_COUNTER_HIGH_BIT_B 26
+#define HNS3_FD_AD_QUEUE_ID_HIGH_BIT 10
+#define HNS3_FD_AD_QUEUE_ID_HIGH_BIT_B 21
enum HNS3_PORT_TYPE {
HOST_PORT,
/* set extend bit if counter_id is in [128 ~ 255] */
if (action->counter_id & BIT(HNS3_FD_AD_COUNTER_HIGH_BIT))
hns3_set_bit(ad_data, HNS3_FD_AD_COUNTER_HIGH_BIT_B, 1);
+ /* set extend bit if queue id > 1024 */
+ if (action->queue_id & BIT(HNS3_FD_AD_QUEUE_ID_HIGH_BIT))
+ hns3_set_bit(ad_data, HNS3_FD_AD_QUEUE_ID_HIGH_BIT_B, 1);
ad_data <<= HNS3_FD_AD_DATA_S;
hns3_set_bit(ad_data, HNS3_FD_AD_DROP_B, action->drop_packet);
if (action->nb_queues == 1)
reg_um = sizeof(ring_reg_addrs) / sizeof(uint32_t);
separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
for (j = 0; j < hw->tqps_num; j++) {
- reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_REG_SIZE * j;
+ reg_offset = hns3_get_tqp_reg_offset(j);
for (i = 0; i < reg_um; i++)
*data++ = hns3_read_dev(hw,
ring_reg_addrs[i] + reg_offset);
#define HNS3_TQP_REG_OFFSET 0x80000
#define HNS3_TQP_REG_SIZE 0x200
+#define HNS3_TQP_EXT_REG_OFFSET 0x100
+#define HNS3_MIN_EXTEND_QUEUE_ID 1024
+
/* bar registers for tqp interrupt */
#define HNS3_TQP_INTR_CTRL_REG 0x20000
#define HNS3_TQP_INTR_GL0_REG 0x20100
req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data;
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false);
- req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
req->stream_id = 0;
hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0);
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false);
req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
- req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
ret = hns3_cmd_send(hw, &desc, 1);
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true);
req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
- req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK);
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
return 0;
}
+uint32_t
+hns3_get_tqp_reg_offset(uint16_t queue_id)
+{
+ uint32_t reg_offset;
+
+ /* Need an extend offset to config queue > 1024 */
+ if (queue_id < HNS3_MIN_EXTEND_QUEUE_ID)
+ reg_offset = HNS3_TQP_REG_OFFSET + queue_id * HNS3_TQP_REG_SIZE;
+ else
+ reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_EXT_REG_OFFSET +
+ (queue_id - HNS3_MIN_EXTEND_QUEUE_ID) *
+ HNS3_TQP_REG_SIZE;
+
+ return reg_offset;
+}
+
int
hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
unsigned int socket_id, const struct rte_eth_rxconf *conf,
rxq->configured = true;
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
+ rxq->io_base = (void *)((char *)hw->io_base +
+ hns3_get_tqp_reg_offset(idx));
rxq->io_head_reg = (volatile void *)((char *)rxq->io_base +
HNS3_RING_RX_HEAD_REG);
rxq->rx_buf_len = rx_buf_size;
txq->pvid_sw_shift_en = false;
txq->max_non_tso_bd_num = hw->max_non_tso_bd_num;
txq->configured = true;
- txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
- idx * HNS3_TQP_REG_SIZE);
+ txq->io_base = (void *)((char *)hw->io_base +
+ hns3_get_tqp_reg_offset(idx));
txq->io_tail_reg = (volatile void *)((char *)txq->io_base +
HNS3_RING_TX_TAIL_REG);
txq->min_tx_pkt_len = hw->min_tx_pkt_len;
struct rte_eth_rxq_info *qinfo);
void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+uint32_t hns3_get_tqp_reg_offset(uint16_t idx);
+
#endif /* _HNS3_RXTX_H_ */
#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_ERROR_INT_XSTATS + \
HNS3_NUM_RESET_XSTATS)
+static void hns3_tqp_stats_clear(struct hns3_hw *hw);
+
/*
* Query all the MAC statistics data of Network ICL command ,opcode id: 0x0034.
* This command is used before send 'query_mac_stat command', the descriptor
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_RX_STATUS,
true);
- desc.data[0] = rte_cpu_to_le_32((uint32_t)i &
- HNS3_QUEUE_ID_MASK);
+ desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
hns3_err(hw, "Failed to query RX No.%d queue stat: %d",
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_TX_STATUS,
true);
- desc.data[0] = rte_cpu_to_le_32((uint32_t)i &
- HNS3_QUEUE_ID_MASK);
+ desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
hns3_err(hw, "Failed to query TX No.%d queue stat: %d",
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- struct hns3_tqp_stats *stats = &hw->tqp_stats;
struct hns3_cmd_desc desc_reset;
struct hns3_rx_queue *rxq;
struct hns3_tx_queue *txq;
int ret;
/*
- * If this is a reset xstats is NULL, and we have cleared the
- * registers by reading them.
+ * Note: Reading hardware statistics of rx/tx queue packet number
+ * will clear them.
*/
for (i = 0; i < hw->tqps_num; i++) {
hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_RX_STATUS,
true);
- desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i &
- HNS3_QUEUE_ID_MASK);
+ desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc_reset, 1);
if (ret) {
hns3_err(hw, "Failed to reset RX No.%d queue stat: %d",
hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_TX_STATUS,
true);
- desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i &
- HNS3_QUEUE_ID_MASK);
+ desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc_reset, 1);
if (ret) {
hns3_err(hw, "Failed to reset TX No.%d queue stat: %d",
}
}
- memset(stats, 0, sizeof(struct hns3_tqp_stats));
+ hns3_tqp_stats_clear(hw);
return 0;
}
/* Get rx queue stats */
for (j = 0; j < dev->data->nb_rx_queues; j++) {
for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
- reg_offset = HNS3_TQP_REG_OFFSET +
- HNS3_TQP_REG_SIZE * j;
+ reg_offset = hns3_get_tqp_reg_offset(j);
xstats[*count].value = hns3_read_dev(hw,
reg_offset + hns3_rx_queue_strings[i].offset);
xstats[*count].id = *count;
/* Get tx queue stats */
for (j = 0; j < dev->data->nb_tx_queues; j++) {
for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
- reg_offset = HNS3_TQP_REG_OFFSET +
- HNS3_TQP_REG_SIZE * j;
+ reg_offset = hns3_get_tqp_reg_offset(j);
xstats[*count].value = hns3_read_dev(hw,
reg_offset + hns3_tx_queue_strings[i].offset);
xstats[*count].id = *count;
return 0;
}
+
+int
+hns3_tqp_stats_init(struct hns3_hw *hw)
+{
+ struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
+
+ tqp_stats->rcb_rx_ring_pktnum = rte_zmalloc("hns3_rx_ring_pkt_num",
+ sizeof(uint64_t) * hw->tqps_num, 0);
+ if (tqp_stats->rcb_rx_ring_pktnum == NULL) {
+ hns3_err(hw, "failed to allocate rx_ring pkt_num.");
+ return -ENOMEM;
+ }
+
+ tqp_stats->rcb_tx_ring_pktnum = rte_zmalloc("hns3_tx_ring_pkt_num",
+ sizeof(uint64_t) * hw->tqps_num, 0);
+ if (tqp_stats->rcb_tx_ring_pktnum == NULL) {
+ hns3_err(hw, "failed to allocate tx_ring pkt_num.");
+ rte_free(tqp_stats->rcb_rx_ring_pktnum);
+ tqp_stats->rcb_rx_ring_pktnum = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+hns3_tqp_stats_uninit(struct hns3_hw *hw)
+{
+ struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
+
+ rte_free(tqp_stats->rcb_rx_ring_pktnum);
+ tqp_stats->rcb_rx_ring_pktnum = NULL;
+ rte_free(tqp_stats->rcb_tx_ring_pktnum);
+ tqp_stats->rcb_tx_ring_pktnum = NULL;
+}
+
+static void
+hns3_tqp_stats_clear(struct hns3_hw *hw)
+{
+ struct hns3_tqp_stats *stats = &hw->tqp_stats;
+
+ stats->rcb_rx_ring_pktnum_rcd = 0;
+ stats->rcb_tx_ring_pktnum_rcd = 0;
+ memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
+ memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
+}
struct hns3_tqp_stats {
uint64_t rcb_tx_ring_pktnum_rcd; /* Total num of transmitted packets */
uint64_t rcb_rx_ring_pktnum_rcd; /* Total num of received packets */
- uint64_t rcb_tx_ring_pktnum[HNS3_MAX_TQP_NUM_PER_FUNC];
- uint64_t rcb_rx_ring_pktnum[HNS3_MAX_TQP_NUM_PER_FUNC];
+ uint64_t *rcb_rx_ring_pktnum;
+ uint64_t *rcb_tx_ring_pktnum;
};
/* mac stats, Statistics counters collected by the MAC, opcode id: 0x0032 */
uint32_t size);
int hns3_stats_reset(struct rte_eth_dev *dev);
void hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err);
+int hns3_tqp_stats_init(struct hns3_hw *hw);
+void hns3_tqp_stats_uninit(struct hns3_hw *hw);
#endif /* _HNS3_STATS_H_ */