static int
hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
- uint32_t idx, __attribute__ ((unused)) uint32_t pool)
+ uint32_t idx, __rte_unused uint32_t pool)
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
}
static int
-hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
- bool mmap, uint16_t queue_id)
+hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
+ enum hns3_ring_type queue_type, uint16_t queue_id)
{
- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_cmd_desc desc;
struct hns3_ctrl_vector_chain_cmd *req =
(struct hns3_ctrl_vector_chain_cmd *)desc.data;
enum hns3_cmd_status status;
enum hns3_opcode_type op;
uint16_t tqp_type_and_id = 0;
+ const char *op_str;
+ uint16_t type;
+ uint16_t gl;
op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
hns3_cmd_setup_basic_desc(&desc, op, false);
req->int_vector_id = vector_id;
+ if (queue_type == HNS3_RING_TYPE_RX)
+ gl = HNS3_RING_GL_RX;
+ else
+ gl = HNS3_RING_GL_TX;
+
+ type = queue_type;
+
hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
- HNS3_RING_TYPE_RX);
+ type);
hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
- HNS3_RING_GL_RX);
+ gl);
req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
-
req->int_cause_num = 1;
+ op_str = mmap ? "Map" : "Unmap";
status = hns3_cmd_send(hw, &desc, 1);
if (status) {
- hns3_err(hw, "Map TQP %d fail, vector_id is %d, status is %d.",
- queue_id, vector_id, status);
- return -EIO;
+ hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.",
+ op_str, queue_id, req->int_vector_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+static int
+hns3_init_ring_with_vector(struct hns3_hw *hw)
+{
+ uint8_t vec;
+ int ret;
+ int i;
+
+ /*
+ * In hns3 network engine, vector 0 is always the misc interrupt of this
+ * function, vector 1~N can be used respectively for the queues of the
+ * function. Tx and Rx queues with the same number share the interrupt
+ * vector. In the initialization clearing the all hardware mapping
+ * relationship configurations between queues and interrupt vectors is
+ * needed, so some error caused by the residual configurations, such as
+ * the unexpected Tx interrupt, can be avoid. Because of the hardware
+ * constraints in hns3 hardware engine, we have to implement clearing
+ * the mapping relationship configurations by binding all queues to the
+ * last interrupt vector and reserving the last interrupt vector. This
+ * method results in a decrease of the maximum queues when upper
+ * applications call the rte_eth_dev_configure API function to enable
+ * Rx interrupt.
+ */
+ vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
+ hw->intr_tqps_num = vec - 1; /* the last interrupt is reserved */
+ for (i = 0; i < hw->intr_tqps_num; i++) {
+ /*
+ * Set gap limiter and rate limiter configuration of queue's
+ * interrupt.
+ */
+ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
+ HNS3_TQP_INTR_GL_DEFAULT);
+ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
+ HNS3_TQP_INTR_GL_DEFAULT);
+ hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+
+ ret = hns3_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_TX, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
+ "vector: %d, ret=%d", i, vec, ret);
+ return ret;
+ }
+
+ ret = hns3_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_RX, i);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
+ "vector: %d, ret=%d", i, vec, ret);
+ return ret;
+ }
}
return 0;
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ uint16_t queue_num = hw->tqps_num;
- info->max_rx_queues = hw->tqps_num;
+ /*
+ * In interrupt mode, 'max_rx_queues' is set based on the number of
+ * MSI-X interrupt resources of the hardware.
+ */
+ if (hw->data->dev_conf.intr_conf.rxq == 1)
+ queue_num = hw->intr_tqps_num;
+
+ info->max_rx_queues = queue_num;
info->max_tx_queues = hw->tqps_num;
info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
info->min_rx_bufsize = hw->rx_buf_len;
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
info->tx_queue_offload_capa);
info->rx_desc_lim = (struct rte_eth_desc_lim) {
struct hns3_pf *pf = &hns->pf;
struct hns3_pf_res_cmd *req;
struct hns3_cmd_desc desc;
+ uint16_t num_msi;
int ret;
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);
pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
- hw->num_msi =
- hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
- HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
+ num_msi = hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
+ HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+ hw->num_msi = (num_msi > hw->tqps_num + 1) ? hw->tqps_num + 1 : num_msi;
return 0;
}
ret = hns3_cmd_send(hw, &desc, 1);
if (ret)
- PMD_INIT_LOG(ERR, "Set promisc mode fail, status is %d", ret);
+ PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret);
return ret;
}
{
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
int ret;
rte_spinlock_lock(&hw->lock);
- ret = hns3_set_promisc_mode(hw, true, en_mc_pmc);
+ ret = hns3_set_promisc_mode(hw, true, true);
rte_spinlock_unlock(&hw->lock);
if (ret)
- hns3_err(hw, "Failed to enable promiscuous mode: %d", ret);
+ hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
+ ret);
return ret;
}
static int
hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
+ bool allmulti = dev->data->all_multicast ? true : false;
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
int ret;
/* If now in all_multicast mode, must remain in all_multicast mode. */
rte_spinlock_lock(&hw->lock);
- ret = hns3_set_promisc_mode(hw, false, en_mc_pmc);
+ ret = hns3_set_promisc_mode(hw, false, allmulti);
rte_spinlock_unlock(&hw->lock);
if (ret)
- hns3_err(hw, "Failed to disable promiscuous mode: %d", ret);
+ hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
+ ret);
return ret;
}
{
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
int ret;
+ if (dev->data->promiscuous)
+ return 0;
+
rte_spinlock_lock(&hw->lock);
- ret = hns3_set_promisc_mode(hw, en_uc_pmc, true);
+ ret = hns3_set_promisc_mode(hw, false, true);
rte_spinlock_unlock(&hw->lock);
if (ret)
- hns3_err(hw, "Failed to enable allmulticast mode: %d", ret);
+ hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
+ ret);
return ret;
}
{
struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
- bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
int ret;
/* If now in promiscuous mode, must remain in all_multicast mode. */
- if (dev->data->promiscuous == 1)
+ if (dev->data->promiscuous)
return 0;
rte_spinlock_lock(&hw->lock);
- ret = hns3_set_promisc_mode(hw, en_uc_pmc, false);
+ ret = hns3_set_promisc_mode(hw, false, false);
rte_spinlock_unlock(&hw->lock);
if (ret)
- hns3_err(hw, "Failed to disable allmulticast mode: %d", ret);
+ hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
+ ret);
return ret;
}
hns3_dev_promisc_restore(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
- bool en_mc_pmc;
- bool en_uc_pmc;
+ bool allmulti = hw->data->all_multicast ? true : false;
- en_uc_pmc = (hw->data->promiscuous == 1) ? true : false;
- en_mc_pmc = (hw->data->all_multicast == 1) ? true : false;
+ if (hw->data->promiscuous)
+ return hns3_set_promisc_mode(hw, true, true);
- return hns3_set_promisc_mode(hw, en_uc_pmc, en_mc_pmc);
+ return hns3_set_promisc_mode(hw, false, allmulti);
}
static int
goto err_fdir;
}
+ /*
+ * In the initialization clearing the all hardware mapping relationship
+ * configurations between queues and interrupt vectors is needed, so
+ * some error caused by the residual configurations, such as the
+ * unexpected interrupt, can be avoid.
+ */
+ ret = hns3_init_ring_with_vector(hw);
+ if (ret)
+ goto err_fdir;
+
return 0;
err_fdir:
}
if (rte_intr_dp_is_en(intr_handle)) {
for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
- ret = hns3_bind_ring_with_vector(dev, vec, true, q_id);
+ ret = hns3_bind_ring_with_vector(hw, vec, true,
+ HNS3_RING_TYPE_RX,
+ q_id);
if (ret)
goto bind_vector_error;
intr_handle->intr_vec[q_id] = vec;
}
if (rte_intr_dp_is_en(intr_handle)) {
for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
- (void)hns3_bind_ring_with_vector(dev, vec, false, q_id);
+ (void)hns3_bind_ring_with_vector(hw, vec, false,
+ HNS3_RING_TYPE_RX,
+ q_id);
if (vec < base + intr_handle->nb_efd - 1)
vec++;
}