bool gro_en;
int ret;
+ hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
+
/*
- * Hardware does not support individually enable/disable/reset the Tx or
- * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
- * and Rx queues at the same time. When the numbers of Tx queues
- * allocated by upper applications are not equal to the numbers of Rx
- * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
- * of Tx/Rx queues. otherwise, network engine can not work as usual. But
- * these fake queues are imperceptible, and can not be used by upper
- * applications.
+ * Some versions of hardware network engine does not support
+ * individually enable/disable/reset the Tx or Rx queue. These devices
+ * must enable/disable/reset Tx and Rx queues at the same time. When the
+ * numbers of Tx queues allocated by upper applications are not equal to
+ * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
+ * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
+ * work as usual. But these fake queues are imperceptible, and can not
+ * be used by upper applications.
*/
- ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
- if (ret) {
- hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
- return ret;
+ if (!hns3_dev_indep_txrx_supported(hw)) {
+ ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
+ if (ret) {
+ hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.",
+ ret);
+ return ret;
+ }
}
hw->adapter_state = HNS3_NIC_CONFIGURING;
DEV_TX_OFFLOAD_MBUF_FAST_FREE |
hns3_txvlan_cap_get(hw));
+ if (hns3_dev_indep_txrx_supported(hw))
+ info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = HNS3_MAX_RING_DESC,
.nb_min = HNS3_MIN_RING_DESC,
if (ret)
return ret;
- /* Enable queues */
- ret = hns3_start_queues(hns, reset_queue);
+ ret = hns3_init_queues(hns, reset_queue);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to start queues: %d", ret);
+ PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret);
return ret;
}
- /* Enable MAC */
ret = hns3_cfg_mac_mode(hw, true);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to enable MAC: %d", ret);
+ PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret);
goto err_config_mac_mode;
}
return 0;
err_config_mac_mode:
- hns3_stop_queues(hns, true);
+ hns3_dev_release_mbufs(hns);
+ hns3_reset_all_tqps(hns);
return ret;
}
return ret;
}
+ /*
+ * There are three register used to control the status of a TQP
+ * (contains a pair of Tx queue and Rx queue) in the new version network
+ * engine. One is used to control the enabling of Tx queue, the other is
+ * used to control the enabling of Rx queue, and the last is the master
+ * switch used to control the enabling of the tqp. The Tx register and
+ * TQP register must be enabled at the same time to enable a Tx queue.
+ * The same applies to the Rx queue. For the older network engine, this
+ * function only refresh the enabled flag, and it is used to update the
+ * status of queue in the dpdk framework.
+ */
+ ret = hns3_start_all_txqs(dev);
+ if (ret) {
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
+ ret = hns3_start_all_rxqs(dev);
+ if (ret) {
+ hns3_stop_all_txqs(dev);
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
hw->adapter_state = HNS3_NIC_STARTED;
rte_spinlock_unlock(&hw->lock);
/* Enable interrupt of all rx queues before enabling queues */
hns3_dev_all_rx_queue_intr_enable(hw, true);
+
/*
- * When finished the initialization, enable queues to receive/transmit
- * packets.
+ * After finished the initialization, enable tqps to receive/transmit
+ * packets and refresh all queue status.
*/
- hns3_enable_all_queues(hw, true);
+ hns3_start_tqps(hw);
hns3_info(hw, "hns3 dev start successful!");
return 0;
hns3_do_stop(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
- bool reset_queue;
int ret;
ret = hns3_cfg_mac_mode(hw, false);
if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
hns3_configure_all_mac_addr(hns, true);
- reset_queue = true;
- } else
- reset_queue = false;
+ ret = hns3_reset_all_tqps(hns);
+ if (ret) {
+ hns3_err(hw, "failed to reset all queues ret = %d.",
+ ret);
+ return ret;
+ }
+ }
hw->mac.default_addr_setted = false;
- return hns3_stop_queues(hns, reset_queue);
+ return 0;
}
static void
rte_spinlock_lock(&hw->lock);
if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ hns3_stop_tqps(hw);
hns3_do_stop(hns);
hns3_unmap_rx_interrupt(dev);
hns3_dev_release_mbufs(hns);
return ret;
}
- ret = hns3_reset_all_queues(hns);
+ ret = hns3_reset_all_tqps(hns);
if (ret) {
hns3_err(hw, "Failed to reset all queues: %d", ret);
return ret;
rte_spinlock_lock(&hw->lock);
if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
hw->adapter_state == HNS3_NIC_STOPPING) {
+ hns3_enable_all_queues(hw, false);
hns3_do_stop(hns);
hw->reset.mbuf_deferred_free = true;
} else
.tx_queue_setup = hns3_tx_queue_setup,
.rx_queue_release = hns3_dev_rx_queue_release,
.tx_queue_release = hns3_dev_tx_queue_release,
+ .rx_queue_start = hns3_dev_rx_queue_start,
+ .rx_queue_stop = hns3_dev_rx_queue_stop,
+ .tx_queue_start = hns3_dev_tx_queue_start,
+ .tx_queue_stop = hns3_dev_tx_queue_stop,
.rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
.rxq_info_get = hns3_rxq_info_get,
bool gro_en;
int ret;
+ hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
+
/*
- * Hardware does not support individually enable/disable/reset the Tx or
- * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx
- * and Rx queues at the same time. When the numbers of Tx queues
- * allocated by upper applications are not equal to the numbers of Rx
- * queues, driver needs to setup fake Tx or Rx queues to adjust numbers
- * of Tx/Rx queues. otherwise, network engine can not work as usual. But
- * these fake queues are imperceptible, and can not be used by upper
- * applications.
+ * Some versions of hardware network engine does not support
+ * individually enable/disable/reset the Tx or Rx queue. These devices
+ * must enable/disable/reset Tx and Rx queues at the same time. When the
+ * numbers of Tx queues allocated by upper applications are not equal to
+ * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
+ * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
+ * work as usual. But these fake queues are imperceptible, and can not
+ * be used by upper applications.
*/
- ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
- if (ret) {
- hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret);
- return ret;
+ if (!hns3_dev_indep_txrx_supported(hw)) {
+ ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
+ if (ret) {
+ hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.",
+ ret);
+ return ret;
+ }
}
hw->adapter_state = HNS3_NIC_CONFIGURING;
DEV_TX_OFFLOAD_MBUF_FAST_FREE |
hns3_txvlan_cap_get(hw));
+ if (hns3_dev_indep_txrx_supported(hw))
+ info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = HNS3_MAX_RING_DESC,
.nb_min = HNS3_MIN_RING_DESC,
hns3vf_do_stop(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
- bool reset_queue;
+ int ret;
hw->mac.link_status = ETH_LINK_DOWN;
if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) {
hns3vf_configure_mac_addr(hns, true);
- reset_queue = true;
- } else
- reset_queue = false;
- return hns3_stop_queues(hns, reset_queue);
+ ret = hns3_reset_all_tqps(hns);
+ if (ret) {
+ hns3_err(hw, "failed to reset all queues ret = %d",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
}
static void
rte_spinlock_lock(&hw->lock);
if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+ hns3_stop_tqps(hw);
hns3vf_do_stop(hns);
hns3vf_unmap_rx_interrupt(dev);
hns3_dev_release_mbufs(hns);
if (ret)
return ret;
- ret = hns3_start_queues(hns, reset_queue);
+ ret = hns3_init_queues(hns, reset_queue);
if (ret)
- hns3_err(hw, "Failed to start queues: %d", ret);
+ hns3_err(hw, "failed to init queues, ret = %d.", ret);
return ret;
}
rte_spinlock_unlock(&hw->lock);
return ret;
}
+
+ /*
+ * There are three register used to control the status of a TQP
+ * (contains a pair of Tx queue and Rx queue) in the new version network
+ * engine. One is used to control the enabling of Tx queue, the other is
+ * used to control the enabling of Rx queue, and the last is the master
+ * switch used to control the enabling of the tqp. The Tx register and
+ * TQP register must be enabled at the same time to enable a Tx queue.
+ * The same applies to the Rx queue. For the older network enginem, this
+ * function only refresh the enabled flag, and it is used to update the
+ * status of queue in the dpdk framework.
+ */
+ ret = hns3_start_all_txqs(dev);
+ if (ret) {
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
+ ret = hns3_start_all_rxqs(dev);
+ if (ret) {
+ hns3_stop_all_txqs(dev);
+ hw->adapter_state = HNS3_NIC_CONFIGURED;
+ rte_spinlock_unlock(&hw->lock);
+ return ret;
+ }
+
hw->adapter_state = HNS3_NIC_STARTED;
rte_spinlock_unlock(&hw->lock);
/* Enable interrupt of all rx queues before enabling queues */
hns3_dev_all_rx_queue_intr_enable(hw, true);
+
/*
- * When finished the initialization, enable queues to receive/transmit
- * packets.
+ * After finished the initialization, start all tqps to receive/transmit
+ * packets and refresh all queue status.
*/
- hns3_enable_all_queues(hw, true);
+ hns3_start_tqps(hw);
return ret;
}
rte_spinlock_lock(&hw->lock);
if (hw->adapter_state == HNS3_NIC_STARTED ||
hw->adapter_state == HNS3_NIC_STOPPING) {
+ hns3_enable_all_queues(hw, false);
hns3vf_do_stop(hns);
hw->reset.mbuf_deferred_free = true;
} else
rte_intr_enable(&pci_dev->intr_handle);
}
- ret = hns3_reset_all_queues(hns);
+ ret = hns3_reset_all_tqps(hns);
if (ret) {
hns3_err(hw, "Failed to reset all queues: %d", ret);
return ret;
.tx_queue_setup = hns3_tx_queue_setup,
.rx_queue_release = hns3_dev_rx_queue_release,
.tx_queue_release = hns3_dev_tx_queue_release,
+ .rx_queue_start = hns3_dev_rx_queue_start,
+ .rx_queue_stop = hns3_dev_rx_queue_stop,
+ .tx_queue_start = hns3_dev_tx_queue_start,
+ .tx_queue_stop = hns3_dev_tx_queue_stop,
.rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
.rxq_info_get = hns3_rxq_info_get,
{
uint16_t i;
- /* Note: Fake rx queue will not enter here */
+ /* Note: Fake tx queue will not enter here */
if (txq->sw_ring) {
for (i = 0; i < txq->nb_tx_desc; i++) {
if (txq->sw_ring[i].mbuf) {
struct hns3_rx_queue *rxq;
struct hns3_tx_queue *txq;
uint32_t rcb_reg;
+ void *tqp_base;
int i;
for (i = 0; i < hw->cfg_max_queues; i++) {
- if (i < nb_rx_q)
- rxq = hw->data->rx_queues[i];
- else
- rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
- if (i < nb_tx_q)
- txq = hw->data->tx_queues[i];
- else
- txq = hw->fkq_data.tx_queues[i - nb_tx_q];
- if (rxq == NULL || txq == NULL ||
- (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
- continue;
+ if (hns3_dev_indep_txrx_supported(hw)) {
+ rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
+ txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
+ /*
+ * After initialization, rxq and txq won't be NULL at
+ * the same time.
+ */
+ if (rxq != NULL)
+ tqp_base = rxq->io_base;
+ else if (txq != NULL)
+ tqp_base = txq->io_base;
+ else
+ return;
+ } else {
+ rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
+ hw->fkq_data.rx_queues[i - nb_rx_q];
- rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
+ tqp_base = rxq->io_base;
+ }
+ /*
+ * This is the master switch that used to control the enabling
+ * of a pair of Tx and Rx queues. Both the Rx and Tx point to
+ * the same register
+ */
+ rcb_reg = hns3_read_reg(tqp_base, HNS3_RING_EN_REG);
if (en)
rcb_reg |= BIT(HNS3_RING_EN_B);
else
rcb_reg &= ~BIT(HNS3_RING_EN_B);
- hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg);
+ hns3_write_reg(tqp_base, HNS3_RING_EN_REG, rcb_reg);
+ }
+}
+
+static void
+hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
+{
+ struct hns3_hw *hw = &txq->hns->hw;
+ uint32_t reg;
+
+ if (hns3_dev_indep_txrx_supported(hw)) {
+ reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
+ if (en)
+ reg |= BIT(HNS3_RING_EN_B);
+ else
+ reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(txq, HNS3_RING_TX_EN_REG, reg);
+ }
+ txq->enabled = en;
+}
+
+static void
+hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
+{
+ struct hns3_hw *hw = &rxq->hns->hw;
+ uint32_t reg;
+
+ if (hns3_dev_indep_txrx_supported(hw)) {
+ reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
+ if (en)
+ reg |= BIT(HNS3_RING_EN_B);
+ else
+ reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(rxq, HNS3_RING_RX_EN_REG, reg);
+ }
+ rxq->enabled = en;
+}
+
+int
+hns3_start_all_txqs(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq;
+ uint16_t i, j;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = hw->data->tx_queues[i];
+ if (!txq) {
+ hns3_err(hw, "Tx queue %u not available or setup.", i);
+ goto start_txqs_fail;
+ }
+ /*
+ * Tx queue is enabled by default. Therefore, the Tx queues
+ * needs to be disabled when deferred_start is set. There is
+ * another master switch used to control the enabling of a pair
+ * of Tx and Rx queues. And the master switch is disabled by
+ * default.
+ */
+ if (txq->tx_deferred_start)
+ hns3_enable_txq(txq, false);
+ else
+ hns3_enable_txq(txq, true);
+ }
+ return 0;
+
+start_txqs_fail:
+ for (j = 0; j < i; j++) {
+ txq = hw->data->tx_queues[j];
+ hns3_enable_txq(txq, false);
+ }
+ return -EINVAL;
+}
+
+int
+hns3_start_all_rxqs(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq;
+ uint16_t i, j;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = hw->data->rx_queues[i];
+ if (!rxq) {
+ hns3_err(hw, "Rx queue %u not available or setup.", i);
+ goto start_rxqs_fail;
+ }
+ /*
+ * Rx queue is enabled by default. Therefore, the Rx queues
+ * needs to be disabled when deferred_start is set. There is
+ * another master switch used to control the enabling of a pair
+ * of Tx and Rx queues. And the master switch is disabled by
+ * default.
+ */
+ if (rxq->rx_deferred_start)
+ hns3_enable_rxq(rxq, false);
+ else
+ hns3_enable_rxq(rxq, true);
+ }
+ return 0;
+
+start_rxqs_fail:
+ for (j = 0; j < i; j++) {
+ rxq = hw->data->rx_queues[j];
+ hns3_enable_rxq(rxq, false);
+ }
+ return -EINVAL;
+}
+
+void
+hns3_stop_all_txqs(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = hw->data->tx_queues[i];
+ if (!txq)
+ continue;
+ hns3_enable_txq(txq, false);
}
}
req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
req->tqp_id = rte_cpu_to_le_16(queue_id);
hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
-
ret = hns3_cmd_send(hw, &desc, 1);
if (ret)
- hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret);
+ hns3_err(hw, "send tqp reset cmd error, queue_id = %u, "
+ "ret = %d", queue_id, ret);
return ret;
}
static int
-hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id)
+hns3_get_tqp_reset_status(struct hns3_hw *hw, uint16_t queue_id,
+ uint8_t *reset_status)
{
struct hns3_reset_tqp_queue_cmd *req;
struct hns3_cmd_desc desc;
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
- hns3_err(hw, "Get reset status error, ret =%d", ret);
+ hns3_err(hw, "get tqp reset status error, queue_id = %u, "
+ "ret = %d.", queue_id, ret);
return ret;
}
-
- return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
+ *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
+ return ret;
}
static int
-hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
+hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
{
#define HNS3_TQP_RESET_TRY_MS 200
+ uint8_t reset_status;
uint64_t end;
- int reset_status;
int ret;
ret = hns3_tqp_enable(hw, queue_id, false);
hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
return ret;
}
- ret = -ETIMEDOUT;
end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
do {
/* Wait for tqp hw reset */
rte_delay_ms(HNS3_POLL_RESPONE_MS);
- reset_status = hns3_get_reset_status(hw, queue_id);
- if (reset_status) {
- ret = 0;
+ ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
+ if (ret)
+ goto tqp_reset_fail;
+
+ if (reset_status)
break;
- }
} while (get_timeofday_ms() < end);
- if (ret) {
- hns3_err(hw, "Reset TQP fail, ret = %d", ret);
- return ret;
+ if (!reset_status) {
+ ret = -ETIMEDOUT;
+ hns3_err(hw, "reset tqp timeout, queue_id = %u, ret = %d",
+ queue_id, ret);
+ goto tqp_reset_fail;
}
ret = hns3_send_reset_tqp_cmd(hw, queue_id, false);
hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret);
return ret;
+
+tqp_reset_fail:
+ hns3_send_reset_tqp_cmd(hw, queue_id, false);
+ return ret;
}
static int
memcpy(msg_data, &queue_id, sizeof(uint16_t));
- return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
+ ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
sizeof(msg_data), true, NULL, 0);
+ if (ret)
+ hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.",
+ queue_id, ret);
+ return ret;
}
static int
-hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id)
+hns3_reset_tqp(struct hns3_adapter *hns, uint16_t queue_id)
{
struct hns3_hw *hw = &hns->hw;
+
if (hns->is_vf)
return hns3vf_reset_tqp(hw, queue_id);
else
- return hns3_reset_tqp(hw, queue_id);
+ return hns3pf_reset_tqp(hw, queue_id);
}
int
-hns3_reset_all_queues(struct hns3_adapter *hns)
+hns3_reset_all_tqps(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
int ret, i;
for (i = 0; i < hw->cfg_max_queues; i++) {
- ret = hns3_reset_queue(hns, i);
+ ret = hns3_reset_tqp(hns, i);
if (ret) {
hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
return ret;
return 0;
}
+static int
+hns3_send_reset_queue_cmd(struct hns3_hw *hw, uint16_t queue_id,
+ enum hns3_ring_type queue_type, bool enable)
+{
+ struct hns3_reset_tqp_queue_cmd *req;
+ struct hns3_cmd_desc desc;
+ int queue_direction;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, false);
+
+ req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
+ queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
+ req->queue_direction = rte_cpu_to_le_16(queue_direction);
+ hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret)
+ hns3_err(hw, "send queue reset cmd error, queue_id = %u, "
+ "queue_type = %s, ret = %d.", queue_id,
+ queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
+ return ret;
+}
+
+static int
+hns3_get_queue_reset_status(struct hns3_hw *hw, uint16_t queue_id,
+ enum hns3_ring_type queue_type,
+ uint8_t *reset_status)
+{
+ struct hns3_reset_tqp_queue_cmd *req;
+ struct hns3_cmd_desc desc;
+ int queue_direction;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE_INDEP, true);
+
+ req = (struct hns3_reset_tqp_queue_cmd *)desc.data;
+ req->tqp_id = rte_cpu_to_le_16(queue_id);
+ queue_direction = queue_type == HNS3_RING_TYPE_TX ? 0 : 1;
+ req->queue_direction = rte_cpu_to_le_16(queue_direction);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "get queue reset status error, queue_id = %u "
+ "queue_type = %s, ret = %d.", queue_id,
+ queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx", ret);
+ return ret;
+ }
+
+ *reset_status = hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B);
+ return ret;
+}
+
+static int
+hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id,
+ enum hns3_ring_type queue_type)
+{
+#define HNS3_QUEUE_RESET_TRY_MS 200
+ struct hns3_tx_queue *txq;
+ struct hns3_rx_queue *rxq;
+ uint32_t reset_wait_times;
+ uint32_t max_wait_times;
+ uint8_t reset_status;
+ int ret;
+
+ if (queue_type == HNS3_RING_TYPE_TX) {
+ txq = hw->data->tx_queues[queue_id];
+ hns3_enable_txq(txq, false);
+ } else {
+ rxq = hw->data->rx_queues[queue_id];
+ hns3_enable_rxq(rxq, false);
+ }
+
+ ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, true);
+ if (ret) {
+ hns3_err(hw, "send reset queue cmd fail, ret = %d.", ret);
+ return ret;
+ }
+
+ reset_wait_times = 0;
+ max_wait_times = HNS3_QUEUE_RESET_TRY_MS / HNS3_POLL_RESPONE_MS;
+ while (reset_wait_times < max_wait_times) {
+ /* Wait for queue hw reset */
+ rte_delay_ms(HNS3_POLL_RESPONE_MS);
+ ret = hns3_get_queue_reset_status(hw, queue_id,
+ queue_type, &reset_status);
+ if (ret)
+ goto queue_reset_fail;
+
+ if (reset_status)
+ break;
+ reset_wait_times++;
+ }
+
+ if (!reset_status) {
+ hns3_err(hw, "reset queue timeout, queue_id = %u, "
+ "queue_type = %s", queue_id,
+ queue_type == HNS3_RING_TYPE_TX ? "Tx" : "Rx");
+ ret = -ETIMEDOUT;
+ goto queue_reset_fail;
+ }
+
+ ret = hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
+ if (ret)
+ hns3_err(hw, "deassert queue reset fail, ret = %d.", ret);
+
+ return ret;
+
+queue_reset_fail:
+ hns3_send_reset_queue_cmd(hw, queue_id, queue_type, false);
+ return ret;
+}
+
+
void
hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
uint8_t gl_idx, uint16_t gl_value)
}
static int
-hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+hns3_init_rxq(struct hns3_adapter *hns, uint16_t idx)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_rx_queue *rxq;
rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
if (ret) {
- hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
+ hns3_err(hw, "fail to alloc mbuf for Rx queue %u, ret = %d.",
idx, ret);
return ret;
}
}
static void
-hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+hns3_init_fake_rxq(struct hns3_adapter *hns, uint16_t idx)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_rx_queue *rxq;
}
static void
-hns3_init_tx_queue(struct hns3_tx_queue *queue)
+hns3_init_txq(struct hns3_tx_queue *txq)
{
- struct hns3_tx_queue *txq = queue;
struct hns3_desc *desc;
int i;
hns3_init_tx_queue_hw(txq);
}
-static void
-hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
-{
- struct hns3_hw *hw = &hns->hw;
- struct hns3_tx_queue *txq;
-
- txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
- hns3_init_tx_queue(txq);
-}
-
-static void
-hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
-{
- struct hns3_hw *hw = &hns->hw;
- struct hns3_tx_queue *txq;
-
- txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
- hns3_init_tx_queue(txq);
-}
-
static void
hns3_init_tx_ring_tc(struct hns3_adapter *hns)
{
}
static int
-hns3_start_rx_queues(struct hns3_adapter *hns)
+hns3_init_rx_queues(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_rx_queue *rxq;
- int i, j;
+ uint16_t i, j;
int ret;
/* Initialize RSS for queues */
ret = hns3_config_rss(hns);
if (ret) {
- hns3_err(hw, "Failed to configure rss %d", ret);
+ hns3_err(hw, "failed to configure rss, ret = %d.", ret);
return ret;
}
for (i = 0; i < hw->data->nb_rx_queues; i++) {
rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ if (!rxq) {
+ hns3_err(hw, "Rx queue %u not available or setup.", i);
+ goto out;
+ }
+
+ if (rxq->rx_deferred_start)
continue;
- ret = hns3_dev_rx_queue_start(hns, i);
+
+ ret = hns3_init_rxq(hns, i);
if (ret) {
- hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
+ hns3_err(hw, "failed to init Rx queue %u, ret = %d.", i,
ret);
goto out;
}
}
- for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
- rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
- continue;
- hns3_fake_rx_queue_start(hns, i);
- }
+ for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++)
+ hns3_init_fake_rxq(hns, i);
+
return 0;
out:
return ret;
}
-static void
-hns3_start_tx_queues(struct hns3_adapter *hns)
+static int
+hns3_init_tx_queues(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
struct hns3_tx_queue *txq;
- int i;
+ uint16_t i;
for (i = 0; i < hw->data->nb_tx_queues; i++) {
txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ if (!txq) {
+ hns3_err(hw, "Tx queue %u not available or setup.", i);
+ return -EINVAL;
+ }
+
+ if (txq->tx_deferred_start)
continue;
- hns3_dev_tx_queue_start(hns, i);
+ hns3_init_txq(txq);
}
for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
- continue;
- hns3_fake_tx_queue_start(hns, i);
+ hns3_init_txq(txq);
}
-
hns3_init_tx_ring_tc(hns);
+
+ return 0;
}
/*
- * Start all queues.
- * Note: just init and setup queues, and don't enable queue rx&tx.
+ * Init all queues.
+ * Note: just init and setup queues, and don't enable tqps.
*/
int
-hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
+hns3_init_queues(struct hns3_adapter *hns, bool reset_queue)
{
struct hns3_hw *hw = &hns->hw;
int ret;
if (reset_queue) {
- ret = hns3_reset_all_queues(hns);
+ ret = hns3_reset_all_tqps(hns);
if (ret) {
- hns3_err(hw, "Failed to reset all queues %d", ret);
+ hns3_err(hw, "failed to reset all queues, ret = %d.",
+ ret);
return ret;
}
}
- ret = hns3_start_rx_queues(hns);
+ ret = hns3_init_rx_queues(hns);
if (ret) {
- hns3_err(hw, "Failed to start rx queues: %d", ret);
+ hns3_err(hw, "failed to init rx queues, ret = %d.", ret);
return ret;
}
- hns3_start_tx_queues(hns);
+ ret = hns3_init_tx_queues(hns);
+ if (ret) {
+ hns3_dev_release_mbufs(hns);
+ hns3_err(hw, "failed to init tx queues, ret = %d.", ret);
+ }
- return 0;
+ return ret;
}
-int
-hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
+void
+hns3_start_tqps(struct hns3_hw *hw)
{
- struct hns3_hw *hw = &hns->hw;
- int ret;
+ struct hns3_tx_queue *txq;
+ struct hns3_rx_queue *rxq;
+ uint16_t i;
- hns3_enable_all_queues(hw, false);
- if (reset_queue) {
- ret = hns3_reset_all_queues(hns);
- if (ret) {
- hns3_err(hw, "Failed to reset all queues %d", ret);
- return ret;
- }
+ hns3_enable_all_queues(hw, true);
+
+ for (i = 0; i < hw->data->nb_tx_queues; i++) {
+ txq = hw->data->tx_queues[i];
+ if (txq->enabled)
+ hw->data->tx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STARTED;
}
- return 0;
+
+ for (i = 0; i < hw->data->nb_rx_queues; i++) {
+ rxq = hw->data->rx_queues[i];
+ if (rxq->enabled)
+ hw->data->rx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ }
+}
+
+void
+hns3_stop_tqps(struct hns3_hw *hw)
+{
+ uint16_t i;
+
+ hns3_enable_all_queues(hw, false);
+
+ for (i = 0; i < hw->data->nb_tx_queues; i++)
+ hw->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ for (i = 0; i < hw->data->nb_rx_queues; i++)
+ hw->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
/*
int ret;
/* Setup new number of fake RX/TX queues and reconfigure device. */
- hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
if (ret) {
hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
- goto cfg_fake_rx_q_fail;
+ return ret;
}
ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
(void)hns3_fake_tx_queue_config(hw, 0);
cfg_fake_tx_q_fail:
(void)hns3_fake_rx_queue_config(hw, 0);
-cfg_fake_rx_q_fail:
- hw->cfg_max_queues = 0;
return ret;
}
if (dev_data->rx_queues)
for (i = 0; i < dev_data->nb_rx_queues; i++) {
rxq = dev_data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ if (rxq == NULL)
continue;
hns3_rx_queue_release_mbufs(rxq);
}
if (dev_data->tx_queues)
for (i = 0; i < dev_data->nb_tx_queues; i++) {
txq = dev_data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ if (txq == NULL)
continue;
hns3_tx_queue_release_mbufs(txq);
}
return 0;
}
+static int
+hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
+ uint16_t nb_desc)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+ struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode;
+ eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+ uint16_t min_vec_bds;
+
+ /*
+ * HNS3 hardware network engine set scattered as default. If the driver
+ * is not work in scattered mode and the pkts greater than buf_size
+ * but smaller than max_rx_pkt_len will be distributed to multiple BDs.
+ * Driver cannot handle this situation.
+ */
+ if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) {
+ hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater "
+ "than rx_buf_len if scattered is off.");
+ return -EINVAL;
+ }
+
+ if (pkt_burst == hns3_recv_pkts_vec) {
+ min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH +
+ HNS3_DEFAULT_RX_BURST;
+ if (nb_desc < min_vec_bds ||
+ nb_desc % HNS3_DEFAULT_RXQ_REARM_THRESH) {
+ hns3_err(hw, "if Rx burst mode is vector, "
+ "number of descriptor is required to be "
+ "bigger than min vector bds:%u, and could be "
+ "divided by rxq rearm thresh:%u.",
+ min_vec_bds, HNS3_DEFAULT_RXQ_REARM_THRESH);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int
hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp, uint16_t nb_desc,
uint16_t *buf_size)
{
+ int ret;
+
if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||
nb_desc % HNS3_ALIGN_RING_DESC) {
hns3_err(hw, "Number (%u) of rx descriptors is invalid",
return -EINVAL;
}
+ if (hw->data->dev_started) {
+ ret = hns3_rxq_conf_runtime_check(hw, *buf_size, nb_desc);
+ if (ret) {
+ hns3_err(hw, "Rx queue runtime setup fail.");
+ return ret;
+ }
+ }
+
return 0;
}
int rx_entry_len;
int ret;
- if (dev->data->dev_started) {
- hns3_err(hw, "rx_queue_setup after dev_start no supported");
- return -EINVAL;
- }
-
ret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);
if (ret)
return ret;
rxq->mb_pool = mp;
rxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?
conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
+
rxq->rx_deferred_start = conf->rx_deferred_start;
+ if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
+ hns3_warn(hw, "deferred start is not supported.");
+ rxq->rx_deferred_start = false;
+ }
rx_entry_len = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
sizeof(struct hns3_entry);
int tx_entry_len;
int ret;
- if (dev->data->dev_started) {
- hns3_err(hw, "tx_queue_setup after dev_start no supported");
- return -EINVAL;
- }
-
ret = hns3_tx_queue_conf_check(hw, conf, nb_desc,
&tx_rs_thresh, &tx_free_thresh, idx);
if (ret)
}
txq->tx_deferred_start = conf->tx_deferred_start;
+ if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
+ hns3_warn(hw, "deferred start is not supported.");
+ txq->tx_deferred_start = false;
+ }
+
tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
RTE_CACHE_LINE_SIZE, socket_id);
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
+
+int
+hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+ int ret;
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
+ if (ret) {
+ hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
+ rx_queue_id, ret);
+ return ret;
+ }
+
+ ret = hns3_init_rxq(hns, rx_queue_id);
+ if (ret) {
+ hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
+ rx_queue_id, ret);
+ return ret;
+ }
+
+ hns3_enable_rxq(rxq, true);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return ret;
+}
+
+int
+hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ hns3_enable_rxq(rxq, false);
+ hns3_rx_queue_release_mbufs(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
+ int ret;
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
+ if (ret) {
+ hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
+ tx_queue_id, ret);
+ return ret;
+ }
+
+ hns3_init_txq(txq);
+ hns3_enable_txq(txq, true);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return ret;
+}
+
+int
+hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
+
+ if (!hns3_dev_indep_txrx_supported(hw))
+ return -ENOTSUP;
+
+ hns3_enable_txq(txq, false);
+ hns3_tx_queue_release_mbufs(txq);
+ /*
+ * All the mbufs in sw_ring are released and all the pointers in sw_ring
+ * are set to NULL. If this queue is still called by upper layer,
+ * residual SW status of this txq may cause these pointers in sw_ring
+ * which have been set to NULL to be released again. To avoid it,
+ * reinit the txq.
+ */
+ hns3_init_txq(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}