+ return 0;
+}
+
+/*
+ * Iterate over all Rx Queue, and call the callback() function for each Rx
+ * queue.
+ *
+ * @param[in] dev
+ * The target eth dev.
+ * @param[in] callback
+ * The function to call for each queue.
+ * if callback function return nonzero will stop iterate and return it's value
+ * @param[in] arg
+ * The arguments to provide the callback function with.
+ *
+ * @return
+ * 0 on success, otherwise with errno set.
+ */
+int
+hns3_rxq_iterate(struct rte_eth_dev *dev,
+ int (*callback)(struct hns3_rx_queue *, void *), void *arg)
+{
+ uint32_t i;
+ int ret;
+
+ if (dev->data->rx_queues == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ ret = callback(dev->data->rx_queues[i], arg);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void*
+hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
+ struct hns3_queue_info *q_info)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ const struct rte_memzone *rx_mz;
+ struct hns3_rx_queue *rxq;
+ unsigned int rx_desc;
+
+ rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
+ RTE_CACHE_LINE_SIZE, q_info->socket_id);
+ if (rxq == NULL) {
+ hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
+ q_info->idx);
+ return NULL;
+ }
+
+ /* Allocate rx ring hardware descriptors. */
+ rxq->queue_id = q_info->idx;
+ rxq->nb_rx_desc = q_info->nb_desc;
+
+ /*
+ * Allocate a litter more memory because rx vector functions
+ * don't check boundaries each time.
+ */
+ rx_desc = (rxq->nb_rx_desc + HNS3_DEFAULT_RX_BURST) *
+ sizeof(struct hns3_desc);
+ rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
+ rx_desc, HNS3_RING_BASE_ALIGN,
+ q_info->socket_id);
+ if (rx_mz == NULL) {
+ hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
+ q_info->idx);
+ hns3_rx_queue_release(rxq);
+ return NULL;
+ }
+ rxq->mz = rx_mz;
+ rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
+ rxq->rx_ring_phys_addr = rx_mz->iova;
+
+ hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
+ rxq->rx_ring_phys_addr);
+
+ return rxq;
+}
+
+static int
+hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t nb_desc, unsigned int socket_id)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_queue_info q_info;
+ struct hns3_rx_queue *rxq;
+ uint16_t nb_rx_q;
+
+ if (hw->fkq_data.rx_queues[idx]) {
+ hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
+ hw->fkq_data.rx_queues[idx] = NULL;
+ }
+
+ q_info.idx = idx;
+ q_info.socket_id = socket_id;
+ q_info.nb_desc = nb_desc;
+ q_info.type = "hns3 fake RX queue";
+ q_info.ring_name = "rx_fake_ring";
+ rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
+ if (rxq == NULL) {
+ hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
+ return -ENOMEM;
+ }
+
+ /* Don't need alloc sw_ring, because upper applications don't use it */
+ rxq->sw_ring = NULL;
+
+ rxq->hns = hns;
+ rxq->rx_deferred_start = false;
+ rxq->port_id = dev->data->port_id;
+ rxq->configured = true;
+ nb_rx_q = dev->data->nb_rx_queues;
+ rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
+ (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
+ rxq->rx_buf_len = HNS3_MIN_BD_BUF_SIZE;
+
+ rte_spinlock_lock(&hw->lock);
+ hw->fkq_data.rx_queues[idx] = rxq;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static void*
+hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
+ struct hns3_queue_info *q_info)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ const struct rte_memzone *tx_mz;
+ struct hns3_tx_queue *txq;
+ struct hns3_desc *desc;
+ unsigned int tx_desc;
+ int i;
+
+ txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
+ RTE_CACHE_LINE_SIZE, q_info->socket_id);
+ if (txq == NULL) {
+ hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
+ q_info->idx);
+ return NULL;
+ }
+
+ /* Allocate tx ring hardware descriptors. */
+ txq->queue_id = q_info->idx;
+ txq->nb_tx_desc = q_info->nb_desc;
+ tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
+ tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
+ tx_desc, HNS3_RING_BASE_ALIGN,
+ q_info->socket_id);
+ if (tx_mz == NULL) {
+ hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
+ q_info->idx);
+ hns3_tx_queue_release(txq);
+ return NULL;
+ }
+ txq->mz = tx_mz;
+ txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
+ txq->tx_ring_phys_addr = tx_mz->iova;
+
+ hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
+ txq->tx_ring_phys_addr);
+
+ /* Clear tx bd */
+ desc = txq->tx_ring;
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ desc->tx.tp_fe_sc_vld_ra_ri = 0;
+ desc++;
+ }
+
+ return txq;
+}
+
+static int
+hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t nb_desc, unsigned int socket_id)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_queue_info q_info;
+ struct hns3_tx_queue *txq;
+ uint16_t nb_tx_q;
+
+ if (hw->fkq_data.tx_queues[idx] != NULL) {
+ hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
+ hw->fkq_data.tx_queues[idx] = NULL;
+ }
+
+ q_info.idx = idx;
+ q_info.socket_id = socket_id;
+ q_info.nb_desc = nb_desc;
+ q_info.type = "hns3 fake TX queue";
+ q_info.ring_name = "tx_fake_ring";
+ txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
+ if (txq == NULL) {
+ hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
+ return -ENOMEM;
+ }
+
+ /* Don't need alloc sw_ring, because upper applications don't use it */
+ txq->sw_ring = NULL;
+ txq->free = NULL;
+
+ txq->hns = hns;
+ txq->tx_deferred_start = false;
+ txq->port_id = dev->data->port_id;
+ txq->configured = true;
+ nb_tx_q = dev->data->nb_tx_queues;
+ txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
+ (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
+
+ rte_spinlock_lock(&hw->lock);
+ hw->fkq_data.tx_queues[idx] = txq;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
+ void **rxq;
+ uint8_t i;
+
+ if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
+ /* first time configuration */
+ uint32_t size;
+ size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
+ hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
+ RTE_CACHE_LINE_SIZE);
+ if (hw->fkq_data.rx_queues == NULL) {
+ hw->fkq_data.nb_fake_rx_queues = 0;
+ return -ENOMEM;
+ }
+ } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
+ /* re-configure */
+ rxq = hw->fkq_data.rx_queues;
+ for (i = nb_queues; i < old_nb_queues; i++)
+ hns3_dev_rx_queue_release(rxq[i]);
+
+ rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL)
+ return -ENOMEM;
+ if (nb_queues > old_nb_queues) {
+ uint16_t new_qs = nb_queues - old_nb_queues;
+ memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
+ }
+
+ hw->fkq_data.rx_queues = rxq;
+ } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
+ rxq = hw->fkq_data.rx_queues;
+ for (i = nb_queues; i < old_nb_queues; i++)
+ hns3_dev_rx_queue_release(rxq[i]);
+
+ rte_free(hw->fkq_data.rx_queues);
+ hw->fkq_data.rx_queues = NULL;
+ }
+
+ hw->fkq_data.nb_fake_rx_queues = nb_queues;
+
+ return 0;
+}
+
+static int
+hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
+ void **txq;
+ uint8_t i;
+
+ if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
+ /* first time configuration */
+ uint32_t size;
+ size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
+ hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
+ RTE_CACHE_LINE_SIZE);
+ if (hw->fkq_data.tx_queues == NULL) {
+ hw->fkq_data.nb_fake_tx_queues = 0;
+ return -ENOMEM;
+ }
+ } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
+ /* re-configure */
+ txq = hw->fkq_data.tx_queues;
+ for (i = nb_queues; i < old_nb_queues; i++)
+ hns3_dev_tx_queue_release(txq[i]);
+ txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (txq == NULL)
+ return -ENOMEM;
+ if (nb_queues > old_nb_queues) {
+ uint16_t new_qs = nb_queues - old_nb_queues;
+ memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
+ }
+
+ hw->fkq_data.tx_queues = txq;
+ } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
+ txq = hw->fkq_data.tx_queues;
+ for (i = nb_queues; i < old_nb_queues; i++)
+ hns3_dev_tx_queue_release(txq[i]);
+
+ rte_free(hw->fkq_data.tx_queues);
+ hw->fkq_data.tx_queues = NULL;
+ }
+ hw->fkq_data.nb_fake_tx_queues = nb_queues;
+
+ return 0;
+}
+
+int
+hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
+ uint16_t nb_tx_q)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t rx_need_add_nb_q;
+ uint16_t tx_need_add_nb_q;
+ uint16_t port_id;
+ uint16_t q;
+ int ret;
+
+ /* Setup new number of fake RX/TX queues and reconfigure device. */
+ hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
+ rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
+ tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
+ ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
+ if (ret) {
+ hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
+ goto cfg_fake_rx_q_fail;
+ }
+
+ ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
+ if (ret) {
+ hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
+ goto cfg_fake_tx_q_fail;
+ }
+
+ /* Allocate and set up fake RX queue per Ethernet port. */
+ port_id = hw->data->port_id;
+ for (q = 0; q < rx_need_add_nb_q; q++) {
+ ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
+ rte_eth_dev_socket_id(port_id));
+ if (ret)
+ goto setup_fake_rx_q_fail;
+ }
+
+ /* Allocate and set up fake TX queue per Ethernet port. */
+ for (q = 0; q < tx_need_add_nb_q; q++) {
+ ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
+ rte_eth_dev_socket_id(port_id));
+ if (ret)
+ goto setup_fake_tx_q_fail;
+ }
+
+ return 0;
+
+setup_fake_tx_q_fail:
+setup_fake_rx_q_fail:
+ (void)hns3_fake_tx_queue_config(hw, 0);
+cfg_fake_tx_q_fail:
+ (void)hns3_fake_rx_queue_config(hw, 0);
+cfg_fake_rx_q_fail:
+ hw->cfg_max_queues = 0;
+
+ return ret;
+}
+
+void
+hns3_dev_release_mbufs(struct hns3_adapter *hns)
+{
+ struct rte_eth_dev_data *dev_data = hns->hw.data;
+ struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
+ int i;
+
+ if (dev_data->rx_queues)
+ for (i = 0; i < dev_data->nb_rx_queues; i++) {
+ rxq = dev_data->rx_queues[i];
+ if (rxq == NULL || rxq->rx_deferred_start)
+ continue;
+ hns3_rx_queue_release_mbufs(rxq);
+ }
+
+ if (dev_data->tx_queues)
+ for (i = 0; i < dev_data->nb_tx_queues; i++) {
+ txq = dev_data->tx_queues[i];
+ if (txq == NULL || txq->tx_deferred_start)
+ continue;
+ hns3_tx_queue_release_mbufs(txq);
+ }
+}
+
+static int
+hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
+{
+ uint16_t vld_buf_size;
+ uint16_t num_hw_specs;
+ uint16_t i;
+
+ /*
+ * hns3 network engine only support to set 4 typical specification, and
+ * different buffer size will affect the max packet_len and the max
+ * number of segmentation when hw gro is turned on in receive side. The
+ * relationship between them is as follows:
+ * rx_buf_size | max_gro_pkt_len | max_gro_nb_seg
+ * ---------------------|-------------------|----------------
+ * HNS3_4K_BD_BUF_SIZE | 60KB | 15
+ * HNS3_2K_BD_BUF_SIZE | 62KB | 31
+ * HNS3_1K_BD_BUF_SIZE | 63KB | 63
+ * HNS3_512_BD_BUF_SIZE | 31.5KB | 63
+ */
+ static const uint16_t hw_rx_buf_size[] = {
+ HNS3_4K_BD_BUF_SIZE,
+ HNS3_2K_BD_BUF_SIZE,
+ HNS3_1K_BD_BUF_SIZE,
+ HNS3_512_BD_BUF_SIZE
+ };
+
+ vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
+ RTE_PKTMBUF_HEADROOM);
+
+ if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
+ return -EINVAL;
+
+ num_hw_specs = RTE_DIM(hw_rx_buf_size);
+ for (i = 0; i < num_hw_specs; i++) {
+ if (vld_buf_size >= hw_rx_buf_size[i]) {
+ *rx_buf_len = hw_rx_buf_size[i];
+ break;
+ }
+ }
+ return 0;
+}
+
+static int
+hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp, uint16_t nb_desc,
+ uint16_t *buf_size)
+{