HNS3_OPC_GBL_RST_STATUS = 0x0021,
HNS3_OPC_QUERY_FUNC_STATUS = 0x0022,
HNS3_OPC_QUERY_PF_RSRC = 0x0023,
+ HNS3_OPC_QUERY_VF_RSRC = 0x0024,
HNS3_OPC_GET_CFG_PARAM = 0x0025,
HNS3_OPC_PF_RST_DONE = 0x0026,
uint8_t rsv[2];
};
-#define HNS3_PF_VEC_NUM_S 0
-#define HNS3_PF_VEC_NUM_M GENMASK(7, 0)
+#define HNS3_VEC_NUM_S 0
+#define HNS3_VEC_NUM_M GENMASK(7, 0)
+#define HNS3_MIN_VECTOR_NUM 2 /* one for msi-x, another for IO */
struct hns3_pf_res_cmd {
uint16_t tqp_num;
uint16_t buf_size;
uint32_t rsv[2];
};
+struct hns3_vf_res_cmd {
+ uint16_t tqp_num;
+ uint16_t reserved;
+ uint16_t msixcap_localid_ba_nic;
+ uint16_t msixcap_localid_ba_rocee;
+ uint16_t vf_intr_vector_number;
+ uint16_t rsv[7];
+};
+
#define HNS3_UMV_SPC_ALC_B 0
struct hns3_umv_spc_alc_cmd {
uint8_t allocate;
uint8_t rsv[18];
};
-#define HNS3_RING_TYPE_B 0
-#define HNS3_RING_TYPE_TX 0
-#define HNS3_RING_TYPE_RX 1
+enum hns3_ring_type {
+ HNS3_RING_TYPE_TX,
+ HNS3_RING_TYPE_RX
+};
+
+enum hns3_int_gl_idx {
+ HNS3_RING_GL_RX,
+ HNS3_RING_GL_TX,
+ HNS3_RING_GL_IMMEDIATE = 3
+};
+
#define HNS3_RING_GL_IDX_S 0
#define HNS3_RING_GL_IDX_M GENMASK(1, 0)
-#define HNS3_RING_GL_RX 0
-#define HNS3_RING_GL_TX 1
#define HNS3_VECTOR_ELEMENTS_PER_CMD 10
}
static int
-hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
- bool mmap, uint16_t queue_id)
+hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
+ enum hns3_ring_type queue_type, uint16_t queue_id)
{
- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_cmd_desc desc;
struct hns3_ctrl_vector_chain_cmd *req =
(struct hns3_ctrl_vector_chain_cmd *)desc.data;
enum hns3_cmd_status status;
enum hns3_opcode_type op;
uint16_t tqp_type_and_id = 0;
+ const char *op_str;
+ uint16_t type;
+ uint16_t gl;
op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
hns3_cmd_setup_basic_desc(&desc, op, false);
req->int_vector_id = vector_id;
+ if (queue_type == HNS3_RING_TYPE_RX)
+ gl = HNS3_RING_GL_RX;
+ else
+ gl = HNS3_RING_GL_TX;
+
+ type = queue_type;
+
hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
- HNS3_RING_TYPE_RX);
+ type);
hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
- HNS3_RING_GL_RX);
+ gl);
req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
-
req->int_cause_num = 1;
+ op_str = mmap ? "Map" : "Unmap";
status = hns3_cmd_send(hw, &desc, 1);
if (status) {
- hns3_err(hw, "Map TQP %d fail, vector_id is %d, status is %d.",
- queue_id, vector_id, status);
- return -EIO;
+ hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.",
+ op_str, queue_id, req->int_vector_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+static int
+hns3_init_ring_with_vector(struct hns3_hw *hw)
+{
+ uint8_t vec;
+ int ret;
+ int i;
+
+ /*
+ * In hns3 network engine, vector 0 is always the misc interrupt of this
+ * function, vector 1~N can be used respectively for the queues of the
+ * function. Tx and Rx queues with the same number share the interrupt
+ * vector. In the initialization clearing the all hardware mapping
+ * relationship configurations between queues and interrupt vectors is
+ * needed, so some error caused by the residual configurations, such as
+ * the unexpected Tx interrupt, can be avoid. Because of the hardware
+ * constraints in hns3 hardware engine, we have to implement clearing
+ * the mapping relationship configurations by binding all queues to the
+ * last interrupt vector and reserving the last interrupt vector. This
+ * method results in a decrease of the maximum queues when upper
+ * applications call the rte_eth_dev_configure API function to enable
+ * Rx interrupt.
+ */
+ vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
+ hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+ for (i = 0; i < hw->intr_tqps_num; i++) {
+ /*
+ * Set gap limiter and rate limiter configuration of queue's
+ * interrupt.
+ */
+ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+ HNS3_TQP_INTR_GL_DEFAULT);
+ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
+ HNS3_TQP_INTR_GL_DEFAULT);
+ hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+
+ ret = hns3_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_TX, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
+ "vector: %d, ret=%d", i, vec, ret);
+ return ret;
+ }
+
+ ret = hns3_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_RX, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
+ "vector: %d, ret=%d", i, vec, ret);
+ return ret;
+ }
}
return 0;
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ uint16_t queue_num = hw->tqps_num;
- info->max_rx_queues = hw->tqps_num;
+ /*
+ * In interrupt mode, 'max_rx_queues' is set based on the number of
+ * MSI-X interrupt resources of the hardware.
+ */
+ if (hw->data->dev_conf.intr_conf.rxq == 1)
+ queue_num = hw->intr_tqps_num;
+
+ info->max_rx_queues = queue_num;
info->max_tx_queues = hw->tqps_num;
info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
info->min_rx_bufsize = hw->rx_buf_len;
struct hns3_pf *pf = &hns->pf;
struct hns3_pf_res_cmd *req;
struct hns3_cmd_desc desc;
+ uint16_t num_msi;
int ret;
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
- hw->num_msi =
- hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
- HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
+ num_msi = hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
+ HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+ hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
return 0;
}
goto err_fdir;
}
+ /*
+ * In the initialization clearing the all hardware mapping relationship
+ * configurations between queues and interrupt vectors is needed, so
+ * some error caused by the residual configurations, such as the
+ * unexpected interrupt, can be avoid.
+ */
+ ret = hns3_init_ring_with_vector(hw);
+ if (ret)
+ goto err_fdir;
+
return 0;
err_fdir:
}
if (rte_intr_dp_is_en(intr_handle)) {
for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
- ret = hns3_bind_ring_with_vector(dev, vec, true, q_id);
+ ret = hns3_bind_ring_with_vector(hw, vec, true,
+ HNS3_RING_TYPE_RX,
+ q_id);
if (ret)
goto bind_vector_error;
intr_handle->intr_vec[q_id] = vec;
}
if (rte_intr_dp_is_en(intr_handle)) {
for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
- (void)hns3_bind_ring_with_vector(dev, vec, false, q_id);
+ (void)hns3_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_RX,
+ q_id);
if (vec < base + intr_handle->nb_efd - 1)
vec++;
}
uint16_t num_msi;
uint16_t total_tqps_num; /* total task queue pairs of this PF */
uint16_t tqps_num; /* num task queue pairs of this function */
+ uint16_t intr_tqps_num; /* num queue pairs mapping interrupt */
uint16_t rss_size_max; /* HW defined max RSS task queue */
uint16_t rx_buf_len;
uint16_t num_tx_desc; /* desc num of per tx queue */
return ret;
}
+static int
+hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
+ bool mmap, enum hns3_ring_type queue_type,
+ uint16_t queue_id)
+{
+ struct hns3_vf_bind_vector_msg bind_msg;
+ const char *op_str;
+ uint16_t code;
+ int ret;
+
+ memset(&bind_msg, 0, sizeof(bind_msg));
+ code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
+ HNS3_MBX_UNMAP_RING_TO_VECTOR;
+ bind_msg.vector_id = vector_id;
+
+ if (queue_type == HNS3_RING_TYPE_RX)
+ bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
+ else
+ bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX;
+
+ bind_msg.param[0].ring_type = queue_type;
+ bind_msg.ring_num = 1;
+ bind_msg.param[0].tqp_index = queue_id;
+ op_str = mmap ? "Map" : "Unmap";
+ ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
+ sizeof(bind_msg), false, NULL, 0);
+ if (ret) {
+ hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.",
+ op_str, queue_id, bind_msg.vector_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hns3vf_init_ring_with_vector(struct hns3_hw *hw)
+{
+ uint8_t vec;
+ int ret;
+ int i;
+
+ /*
+ * In hns3 network engine, vector 0 is always the misc interrupt of this
+ * function, vector 1~N can be used respectively for the queues of the
+ * function. Tx and Rx queues with the same number share the interrupt
+ * vector. In the initialization clearing the all hardware mapping
+ * relationship configurations between queues and interrupt vectors is
+ * needed, so some error caused by the residual configurations, such as
+ * the unexpected Tx interrupt, can be avoid. Because of the hardware
+ * constraints in hns3 hardware engine, we have to implement clearing
+ * the mapping relationship configurations by binding all queues to the
+ * last interrupt vector and reserving the last interrupt vector. This
+ * method results in a decrease of the maximum queues when upper
+ * applications call the rte_eth_dev_configure API function to enable
+ * Rx interrupt.
+ */
+ vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
+ hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+ for (i = 0; i < hw->intr_tqps_num; i++) {
+ /*
+ * Set gap limiter and rate limiter configuration of queue's
+ * interrupt.
+ */
+ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+ HNS3_TQP_INTR_GL_DEFAULT);
+ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
+ HNS3_TQP_INTR_GL_DEFAULT);
+ hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+
+ ret = hns3vf_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_TX, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
+ "vector: %d, ret=%d", i, vec, ret);
+ return ret;
+ }
+
+ ret = hns3vf_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_RX, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
+ "vector: %d, ret=%d", i, vec, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int
hns3vf_dev_configure(struct rte_eth_dev *dev)
{
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ uint16_t q_num = hw->tqps_num;
+
+ /*
+ * In interrupt mode, 'max_rx_queues' is set based on the number of
+ * MSI-X interrupt resources of the hardware.
+ */
+ if (hw->data->dev_conf.intr_conf.rxq == 1)
+ q_num = hw->intr_tqps_num;
- info->max_rx_queues = hw->tqps_num;
+ info->max_rx_queues = q_num;
info->max_tx_queues = hw->tqps_num;
info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
info->min_rx_bufsize = hw->rx_buf_len;
eth_dev);
}
+static int
+hns3_query_vf_resource(struct hns3_hw *hw)
+{
+ struct hns3_vf_res_cmd *req;
+ struct hns3_cmd_desc desc;
+ uint16_t num_msi;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "query vf resource failed, ret = %d", ret);
+ return ret;
+ }
+
+ req = (struct hns3_vf_res_cmd *)desc.data;
+ num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
+ HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+ if (num_msi < HNS3_MIN_VECTOR_NUM) {
+ hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
+ num_msi, HNS3_MIN_VECTOR_NUM);
+ return -EINVAL;
+ }
+
+ hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
+
+ return 0;
+}
+
static int
hns3vf_init_hardware(struct hns3_adapter *hns)
{
goto err_cmd_init;
}
+ /* Get VF resource */
+ ret = hns3_query_vf_resource(hw);
+ if (ret)
+ goto err_cmd_init;
+
rte_spinlock_init(&hw->mbx_resp.lock);
hns3vf_clear_event_cause(hw, 0);
hns3_set_default_rss_args(hw);
+ /*
+ * In the initialization clearing the all hardware mapping relationship
+ * configurations between queues and interrupt vectors is needed, so
+ * some error caused by the residual configurations, such as the
+ * unexpected interrupt, can be avoid.
+ */
+ ret = hns3vf_init_ring_with_vector(hw);
+ if (ret)
+ goto err_get_config;
+
return 0;
err_get_config:
hw->io_base = NULL;
}
-static int
-hns3vf_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
- bool mmap, uint16_t queue_id)
-
-{
- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct hns3_vf_bind_vector_msg bind_msg;
- uint16_t code;
- int ret;
-
- memset(&bind_msg, 0, sizeof(bind_msg));
- code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
- HNS3_MBX_UNMAP_RING_TO_VECTOR;
- bind_msg.vector_id = vector_id;
- bind_msg.ring_num = 1;
- bind_msg.param[0].ring_type = HNS3_RING_TYPE_RX;
- bind_msg.param[0].tqp_index = queue_id;
- bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
-
- ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
- sizeof(bind_msg), false, NULL, 0);
- if (ret) {
- hns3_err(hw, "Map TQP %d fail, vector_id is %d, ret is %d.",
- queue_id, vector_id, ret);
- return ret;
- }
-
- return 0;
-}
-
static int
hns3vf_do_stop(struct hns3_adapter *hns)
{
}
if (rte_intr_dp_is_en(intr_handle)) {
for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
- (void)hns3vf_bind_ring_with_vector(dev, vec, false,
+ (void)hns3vf_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_RX,
q_id);
if (vec < base + intr_handle->nb_efd - 1)
vec++;
}
if (rte_intr_dp_is_en(intr_handle)) {
for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
- ret = hns3vf_bind_ring_with_vector(dev, vec, true,
+ ret = hns3vf_bind_ring_with_vector(hw, vec, true,
+ HNS3_RING_TYPE_RX,
q_id);
if (ret)
goto vf_bind_vector_error;
#define HNS3_RING_EN_B 0
-#define HNS3_VECTOR_REG_OFFSET 0x4
-#define HNS3_VECTOR_VF_OFFSET 0x100000
-
#define HNS3_TQP_REG_OFFSET 0x80000
#define HNS3_TQP_REG_SIZE 0x200
#define HNS3_TQP_INTR_RL_REG 0x20900
#define HNS3_TQP_INTR_REG_SIZE 4
+#define HNS3_TQP_INTR_GL_MAX 0x1FE0
+#define HNS3_TQP_INTR_GL_DEFAULT 20
+#define HNS3_TQP_INTR_RL_MAX 0xEC
+#define HNS3_TQP_INTR_RL_ENABLE_MASK 0x40
+#define HNS3_TQP_INTR_RL_DEFAULT 0
+
+/* gl_usec convert to hardware count, as writing each 1 represents 2us */
+#define HNS3_GL_USEC_TO_REG(gl_usec) ((gl_usec) >> 1)
+/* rl_usec convert to hardware count, as writing each 1 represents 4us */
+#define HNS3_RL_USEC_TO_REG(rl_usec) ((rl_usec) >> 2)
int hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs);
#endif /* _HNS3_REGS_H_ */
}
void
-hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
+hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
+ uint8_t gl_idx, uint16_t gl_value)
{
+ uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
+ HNS3_TQP_INTR_GL1_REG,
+ HNS3_TQP_INTR_GL2_REG};
uint32_t addr, value;
- addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
+ if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
+ return;
+
+ addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ value = HNS3_GL_USEC_TO_REG(gl_value);
+
+ hns3_write_dev(hw, addr, value);
+}
+
+void
+hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
+{
+ uint32_t addr, value;
+
+ if (rl_value > HNS3_TQP_INTR_RL_MAX)
+ return;
+
+ addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ value = HNS3_RL_USEC_TO_REG(rl_value);
+ if (value > 0)
+ value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
+
+ hns3_write_dev(hw, addr, value);
+}
+
+static void
+hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
+{
+ uint32_t addr, value;
+
+ addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
value = en ? 1 : 0;
hns3_write_dev(hw, addr, value);
if (dev->data->dev_conf.intr_conf.rxq == 0)
return -ENOTSUP;
- /* enable the vectors */
- hns3_tqp_intr_enable(hw, queue_id, true);
+ hns3_queue_intr_enable(hw, queue_id, true);
return rte_intr_ack(intr_handle);
}
if (dev->data->dev_conf.intr_conf.rxq == 0)
return -ENOTSUP;
- /* disable the vectors */
- hns3_tqp_intr_enable(hw, queue_id, false);
+ hns3_queue_intr_enable(hw, queue_id, false);
return 0;
}
if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
/* first time configuration */
-
uint32_t size;
size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
}
} else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
/* re-configure */
-
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
hns3_dev_rx_queue_release(rxq[i]);
if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
/* first time configuration */
-
uint32_t size;
size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
}
} else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
/* re-configure */
-
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
hns3_dev_tx_queue_release(txq[i]);
uint16_t nb_pkts);
const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
-void hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en);
+void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
+ uint8_t gl_idx, uint16_t gl_value);
+void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
+ uint16_t rl_value);
int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
uint16_t nb_tx_q);