uint8_t rsv[2];
};
-#define HNS3_VEC_NUM_S 0
-#define HNS3_VEC_NUM_M GENMASK(7, 0)
+#define HNS3_PF_VEC_NUM_S 0
+#define HNS3_PF_VEC_NUM_M GENMASK(15, 0)
#define HNS3_MIN_VECTOR_NUM 2 /* one for msi-x, another for IO */
struct hns3_pf_res_cmd {
uint16_t tqp_num;
uint16_t buf_size;
uint16_t msixcap_localid_ba_nic;
- uint16_t msixcap_localid_ba_rocee;
- uint16_t pf_intr_vector_number;
+ uint16_t nic_pf_intr_vector_number;
+ uint16_t roce_pf_intr_vector_number;
uint16_t pf_own_fun_number;
uint16_t tx_buf_size;
uint16_t dv_buf_size;
- uint32_t rsv[2];
+ uint16_t tqp_num_ext;
+ uint16_t roh_pf_intr_vector_number;
+ uint32_t rsv[1];
};
+#define HNS3_VF_VEC_NUM_S 0
+#define HNS3_VF_VEC_NUM_M GENMASK(7, 0)
struct hns3_vf_res_cmd {
uint16_t tqp_num;
uint16_t reserved;
static int
hns3_init_ring_with_vector(struct hns3_hw *hw)
{
- uint8_t vec;
+ uint16_t vec;
int ret;
int i;
* vector. In the initialization clearing the all hardware mapping
* relationship configurations between queues and interrupt vectors is
* needed, so some error caused by the residual configurations, such as
- * the unexpected Tx interrupt, can be avoid. Because of the hardware
- * constraints in hns3 hardware engine, we have to implement clearing
- * the mapping relationship configurations by binding all queues to the
- * last interrupt vector and reserving the last interrupt vector. This
- * method results in a decrease of the maximum queues when upper
- * applications call the rte_eth_dev_configure API function to enable
- * Rx interrupt.
+ * the unexpected Tx interrupt, can be avoid.
*/
vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
- /* vec - 1: the last interrupt is reserved */
- hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1;
+ if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
+ vec = vec - 1; /* the last interrupt is reserved */
+ hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
for (i = 0; i < hw->intr_tqps_num; i++) {
/*
- * Set gap limiter and rate limiter configuration of queue's
- * interrupt.
+ * Set gap limiter/rate limiter/quanity limiter algorithm
+ * configuration for interrupt coalesce of queue's interrupt.
*/
hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
HNS3_TQP_INTR_GL_DEFAULT);
hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
HNS3_TQP_INTR_GL_DEFAULT);
hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+ hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
ret = hns3_bind_ring_with_vector(hw, vec, false,
HNS3_RING_TYPE_TX, i);
pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
hw->num_msi =
- hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
- HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+ hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number),
+ HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
return 0;
}
if (revision < PCI_REVISION_ID_HIP09_A) {
hns3_set_default_dev_specifications(hw);
+ hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
+ hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
+ hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
return 0;
}
return ret;
}
+ hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
+ hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
+ hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
+
return 0;
}
struct hns3_wait_data *wait_data;
};
+#define HNS3_INTR_MAPPING_VEC_RSV_ONE 0
+#define HNS3_INTR_MAPPING_VEC_ALL 1
+
+#define HNS3_INTR_COALESCE_NON_QL 0
+#define HNS3_INTR_COALESCE_QL 1
+
+#define HNS3_INTR_COALESCE_GL_UINT_2US 0
+#define HNS3_INTR_COALESCE_GL_UINT_1US 1
+
+struct hns3_queue_intr {
+ /*
+ * interrupt mapping mode.
+ * value range:
+ * HNS3_INTR_MAPPING_VEC_RSV_ONE/HNS3_INTR_MAPPING_VEC_ALL
+ *
+ * - HNS3_INTR_MAPPING_VEC_RSV_ONE
+ * For some versions of hardware network engine, because of the
+ * hardware constraint, we need implement clearing the mapping
+ * relationship configurations by binding all queues to the last
+ * interrupt vector and reserving the last interrupt vector. This
+ * method results in a decrease of the maximum queues when upper
+ * applications call the rte_eth_dev_configure API function to
+ * enable Rx interrupt.
+ *
+ * - HNS3_INTR_MAPPING_VEC_ALL
+ * PMD driver can map/unmmap all interrupt vectors with queues When
+ * Rx interrupt in enabled.
+ */
+ uint8_t mapping_mode;
+ /*
+ * interrupt coalesce mode.
+ * value range:
+ * HNS3_INTR_COALESCE_NON_QL/HNS3_INTR_COALESCE_QL
+ *
+ * - HNS3_INTR_COALESCE_NON_QL
+ * For some versions of hardware network engine, hardware doesn't
+ * support QL(quanity limiter) algorithm for interrupt coalesce
+ * of queue's interrupt.
+ *
+ * - HNS3_INTR_COALESCE_QL
+ * In this mode, hardware support QL(quanity limiter) algorithm for
+ * interrupt coalesce of queue's interrupt.
+ */
+ uint8_t coalesce_mode;
+ /*
+ * The unit of GL(gap limiter) configuration for interrupt coalesce of
+ * queue's interrupt.
+ * value range:
+ * HNS3_INTR_COALESCE_GL_UINT_2US/HNS3_INTR_COALESCE_GL_UINT_1US
+ */
+ uint8_t gl_unit;
+};
+
struct hns3_hw {
struct rte_eth_dev_data *data;
void *io_base;
uint32_t capability;
uint32_t max_tm_rate;
+
+ struct hns3_queue_intr intr;
+
uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
struct hns3_port_base_vlan_config port_base_vlan_cfg;
static int
hns3vf_init_ring_with_vector(struct hns3_hw *hw)
{
- uint8_t vec;
+ uint16_t vec;
int ret;
int i;
* vector. In the initialization clearing the all hardware mapping
* relationship configurations between queues and interrupt vectors is
* needed, so some error caused by the residual configurations, such as
- * the unexpected Tx interrupt, can be avoid. Because of the hardware
- * constraints in hns3 hardware engine, we have to implement clearing
- * the mapping relationship configurations by binding all queues to the
- * last interrupt vector and reserving the last interrupt vector. This
- * method results in a decrease of the maximum queues when upper
- * applications call the rte_eth_dev_configure API function to enable
- * Rx interrupt.
+ * the unexpected Tx interrupt, can be avoid.
*/
vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
- /* vec - 1: the last interrupt is reserved */
- hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1;
+ if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
+ vec = vec - 1; /* the last interrupt is reserved */
+ hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
for (i = 0; i < hw->intr_tqps_num; i++) {
/*
- * Set gap limiter and rate limiter configuration of queue's
- * interrupt.
+ * Set gap limiter/rate limiter/quanity limiter algorithm
+ * configuration for interrupt coalesce of queue's interrupt.
*/
hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
HNS3_TQP_INTR_GL_DEFAULT);
hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
HNS3_TQP_INTR_GL_DEFAULT);
hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+ hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
ret = hns3vf_bind_ring_with_vector(hw, vec, false,
HNS3_RING_TYPE_TX, i);
if (revision < PCI_REVISION_ID_HIP09_A) {
hns3vf_set_default_dev_specifications(hw);
+ hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
+ hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
+ hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
return 0;
}
return ret;
}
+ hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
+ hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
+ hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
+
return 0;
}
req = (struct hns3_vf_res_cmd *)desc.data;
num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
- HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+ HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
if (num_msi < HNS3_MIN_VECTOR_NUM) {
hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
num_msi, HNS3_MIN_VECTOR_NUM);
#define HNS3_TQP_INTR_GL1_REG 0x20200
#define HNS3_TQP_INTR_GL2_REG 0x20300
#define HNS3_TQP_INTR_RL_REG 0x20900
+#define HNS3_TQP_INTR_TX_QL_REG 0x20e00
+#define HNS3_TQP_INTR_RX_QL_REG 0x20f00
#define HNS3_TQP_INTR_REG_SIZE 4
#define HNS3_TQP_INTR_GL_MAX 0x1FE0
#define HNS3_TQP_INTR_GL_DEFAULT 20
+#define HNS3_TQP_INTR_GL_UNIT_1US BIT(31)
#define HNS3_TQP_INTR_RL_MAX 0xEC
#define HNS3_TQP_INTR_RL_ENABLE_MASK 0x40
#define HNS3_TQP_INTR_RL_DEFAULT 0
+#define HNS3_TQP_INTR_QL_DEFAULT 0
/* gl_usec convert to hardware count, as writing each 1 represents 2us */
#define HNS3_GL_USEC_TO_REG(gl_usec) ((gl_usec) >> 1)
return;
addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
- value = HNS3_GL_USEC_TO_REG(gl_value);
+ if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
+ value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
+ else
+ value = HNS3_GL_USEC_TO_REG(gl_value);
hns3_write_dev(hw, addr, value);
}
hns3_write_dev(hw, addr, value);
}
+void
+hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
+{
+ uint32_t addr;
+
+ if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
+ return;
+
+ addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ hns3_write_dev(hw, addr, ql_value);
+
+ addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ hns3_write_dev(hw, addr, ql_value);
+}
+
static void
hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
{
uint8_t gl_idx, uint16_t gl_value);
void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
uint16_t rl_value);
+void hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id,
+ uint16_t ql_value);
int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
uint16_t nb_tx_q);
int hns3_config_gro(struct hns3_hw *hw, bool en);