#define HNS3_TQP_ID_M GENMASK(12, 2)
#define HNS3_INT_GL_IDX_S 13
#define HNS3_INT_GL_IDX_M GENMASK(14, 13)
+#define HNS3_TQP_INT_ID_L_S 0
+#define HNS3_TQP_INT_ID_L_M GENMASK(7, 0)
+#define HNS3_TQP_INT_ID_H_S 8
+#define HNS3_TQP_INT_ID_H_M GENMASK(15, 8)
struct hns3_ctrl_vector_chain_cmd {
- uint8_t int_vector_id;
+ uint8_t int_vector_id; /* the low order of the interrupt id */
uint8_t int_cause_num;
uint16_t tqp_type_and_id[HNS3_VECTOR_ELEMENTS_PER_CMD];
uint8_t vfid;
- uint8_t rsv;
+ uint8_t int_vector_id_h; /* the high order of the interrupt id */
};
struct hns3_config_max_frm_size_cmd {
}
static int
-hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
+hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en,
enum hns3_ring_type queue_type, uint16_t queue_id)
{
struct hns3_cmd_desc desc;
enum hns3_cmd_status status;
enum hns3_opcode_type op;
uint16_t tqp_type_and_id = 0;
- const char *op_str;
uint16_t type;
uint16_t gl;
- op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
+ op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
hns3_cmd_setup_basic_desc(&desc, op, false);
- req->int_vector_id = vector_id;
+ req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M,
+ HNS3_TQP_INT_ID_L_S);
+ req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M,
+ HNS3_TQP_INT_ID_H_S);
if (queue_type == HNS3_RING_TYPE_RX)
gl = HNS3_RING_GL_RX;
gl);
req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
req->int_cause_num = 1;
- op_str = mmap ? "Map" : "Unmap";
status = hns3_cmd_send(hw, &desc, 1);
if (status) {
hns3_err(hw, "%s TQP %u fail, vector_id is %u, status is %d.",
- op_str, queue_id, req->int_vector_id, status);
+ en ? "Map" : "Unmap", queue_id, vector_id, status);
return status;
}
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
- uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
+ uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
+ uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
uint32_t intr_vector;
uint16_t q_id;
int ret;
reg_num = sizeof(tqp_intr_reg_addrs) / sizeof(uint32_t);
for (j = 0; j < hw->intr_tqps_num; j++) {
- reg_offset = HNS3_TQP_INTR_REG_SIZE * j;
+ reg_offset = hns3_get_tqp_intr_reg_offset(j);
for (i = 0; i < reg_num; i++)
*data++ = hns3_read_dev(hw, tqp_intr_reg_addrs[i] +
reg_offset);
#define HNS3_MIN_EXTEND_QUEUE_ID 1024
/* bar registers for tqp interrupt */
-#define HNS3_TQP_INTR_CTRL_REG 0x20000
-#define HNS3_TQP_INTR_GL0_REG 0x20100
-#define HNS3_TQP_INTR_GL1_REG 0x20200
-#define HNS3_TQP_INTR_GL2_REG 0x20300
-#define HNS3_TQP_INTR_RL_REG 0x20900
-#define HNS3_TQP_INTR_TX_QL_REG 0x20e00
-#define HNS3_TQP_INTR_RX_QL_REG 0x20f00
-
-#define HNS3_TQP_INTR_REG_SIZE 4
+#define HNS3_TQP_INTR_REG_BASE 0x20000
+#define HNS3_TQP_INTR_EXT_REG_BASE 0x30000
+#define HNS3_TQP_INTR_CTRL_REG 0
+#define HNS3_TQP_INTR_GL0_REG 0x100
+#define HNS3_TQP_INTR_GL1_REG 0x200
+#define HNS3_TQP_INTR_GL2_REG 0x300
+#define HNS3_TQP_INTR_RL_REG 0x900
+#define HNS3_TQP_INTR_TX_QL_REG 0xe00
+#define HNS3_TQP_INTR_RX_QL_REG 0xf00
+#define HNS3_TQP_INTR_RL_EN_B 6
+
+#define HNS3_MIN_EXT_TQP_INTR_ID 64
+#define HNS3_TQP_INTR_LOW_ORDER_OFFSET 0x4
+#define HNS3_TQP_INTR_HIGH_ORDER_OFFSET 0x1000
+
#define HNS3_TQP_INTR_GL_MAX 0x1FE0
#define HNS3_TQP_INTR_GL_DEFAULT 20
#define HNS3_TQP_INTR_GL_UNIT_1US BIT(31)
return ret;
}
+uint32_t
+hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id)
+{
+ uint32_t reg_offset;
+
+ /* Need an extend offset to config queues > 64 */
+ if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID)
+ reg_offset = HNS3_TQP_INTR_REG_BASE +
+ tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET;
+ else
+ reg_offset = HNS3_TQP_INTR_EXT_REG_BASE +
+ tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID *
+ HNS3_TQP_INTR_HIGH_ORDER_OFFSET +
+ tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID *
+ HNS3_TQP_INTR_LOW_ORDER_OFFSET;
+
+ return reg_offset;
+}
void
hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
return;
- addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id);
if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
else
if (rl_value > HNS3_TQP_INTR_RL_MAX)
return;
- addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
value = HNS3_RL_USEC_TO_REG(rl_value);
if (value > 0)
value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE)
return;
- addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
hns3_write_dev(hw, addr, ql_value);
- addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
hns3_write_dev(hw, addr, ql_value);
}
{
uint32_t addr, value;
- addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+ addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id);
value = en ? 1 : 0;
hns3_write_dev(hw, addr, value);
const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev);
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
+uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id);
void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
uint8_t gl_idx, uint16_t gl_value);
void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,