}
void
-hns3_tc_queue_mapping_cfg(struct hns3_hw *hw)
+hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
+{
+ uint16_t rx_qnum_per_tc;
+
+ rx_qnum_per_tc = nb_rx_q / hw->num_tc;
+ rx_qnum_per_tc = RTE_MIN(hw->rss_size_max, rx_qnum_per_tc);
+ if (hw->alloc_rss_size != rx_qnum_per_tc) {
+ hns3_info(hw, "rss size changes from %u to %u",
+ hw->alloc_rss_size, rx_qnum_per_tc);
+ hw->alloc_rss_size = rx_qnum_per_tc;
+ }
+ hw->used_rx_queues = hw->num_tc * hw->alloc_rss_size;
+}
+
+void
+hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue)
{
struct hns3_tc_queue_info *tc_queue;
uint8_t i;
+ hw->tx_qnum_per_tc = nb_queue / hw->num_tc;
for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
tc_queue = &hw->tc_queue[i];
if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
tc_queue->enable = true;
- tc_queue->tqp_offset = i * hw->alloc_rss_size;
- tc_queue->tqp_count = hw->alloc_rss_size;
+ tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
+ tc_queue->tqp_count = hw->tx_qnum_per_tc;
tc_queue->tc = i;
} else {
/* Set to default queue if TC is disable */
tc_queue->tc = 0;
}
}
+ hw->used_tx_queues = hw->num_tc * hw->tx_qnum_per_tc;
}
static void
-hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t queue_num)
+hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
+ uint16_t nb_tx_q)
{
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
struct hns3_pf *pf = &hns->pf;
- uint16_t tqpnum_per_tc;
- uint16_t alloc_tqps;
-
- alloc_tqps = RTE_MIN(hw->tqps_num, queue_num);
- hw->num_tc = RTE_MIN(alloc_tqps, hw->dcb_info.num_tc);
- tqpnum_per_tc = RTE_MIN(hw->rss_size_max, alloc_tqps / hw->num_tc);
- if (hw->alloc_rss_size != tqpnum_per_tc) {
- PMD_INIT_LOG(INFO, "rss size changes from %d to %d",
- hw->alloc_rss_size, tqpnum_per_tc);
- hw->alloc_rss_size = tqpnum_per_tc;
- }
- hw->alloc_tqps = hw->num_tc * hw->alloc_rss_size;
+ hw->num_tc = hw->dcb_info.num_tc;
+ hns3_set_rss_size(hw, nb_rx_q);
+ hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
- hns3_tc_queue_mapping_cfg(hw);
-
- memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
+ if (!hns->is_vf)
+ memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
}
int
for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
- hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues);
+ hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
+ hw->data->nb_tx_queues);
}
-static void
+static int
hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
{
struct hns3_pf *pf = &hns->pf;
struct hns3_hw *hw = &hns->hw;
+ uint16_t nb_rx_q = hw->data->nb_rx_queues;
+ uint16_t nb_tx_q = hw->data->nb_tx_queues;
uint8_t bit_map = 0;
uint8_t i;
if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
hw->dcb_info.num_pg != 1)
- return;
+ return -EINVAL;
+
+ if (nb_rx_q < num_tc) {
+ hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
+ nb_rx_q, num_tc);
+ return -EINVAL;
+ }
+
+ if (nb_tx_q < num_tc) {
+ hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
+ nb_tx_q, num_tc);
+ return -EINVAL;
+ }
/* Currently not support uncontinuous tc */
hw->dcb_info.num_tc = num_tc;
bit_map = 1;
hw->dcb_info.num_tc = 1;
}
-
hw->hw_tc_map = bit_map;
-
hns3_dcb_info_cfg(hns);
+
+ return 0;
}
static int
hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
- hns3_dcb_info_update(hns, num_tc);
+ ret = hns3_dcb_info_update(hns, num_tc);
+ if (ret) {
+ hns3_err(hw, "dcb info update failed: %d", ret);
+ return ret;
+ }
+
ret = hns3_dcb_hw_configure(hns);
if (ret) {
- hns3_err(hw, "dcb sw configure fails: %d", ret);
+ hns3_err(hw, "dcb sw configure failed: %d", ret);
return ret;
}
}
hns3_err(hw, "dcb info init failed: %d", ret);
return ret;
}
- hns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num);
+ hns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num,
+ hw->tqps_num);
}
/*
hns3_update_queue_map_configure(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
- uint16_t queue_num = hw->data->nb_rx_queues;
+ uint16_t nb_rx_q = hw->data->nb_rx_queues;
+ uint16_t nb_tx_q = hw->data->nb_tx_queues;
int ret;
- hns3_dcb_update_tc_queue_mapping(hw, queue_num);
+ hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
ret = hns3_q_to_qs_map(hw);
if (ret) {
hns3_err(hw, "failed to map nq to qs! ret = %d", ret);
{
uint16_t i;
+ /* Note: Fake rx queue will not enter here */
if (rxq->sw_ring) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
if (rxq->sw_ring[i].mbuf) {
{
uint16_t i;
+ /* Note: Fake rx queue will not enter here */
if (txq->sw_ring) {
for (i = 0; i < txq->nb_tx_desc; i++) {
if (txq->sw_ring[i].mbuf) {
rte_spinlock_unlock(&hns->hw.lock);
}
-void
-hns3_free_all_queues(struct rte_eth_dev *dev)
+static void
+hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
+{
+ struct hns3_rx_queue *rxq = queue;
+ struct hns3_adapter *hns;
+ struct hns3_hw *hw;
+ uint16_t idx;
+
+ if (rxq == NULL)
+ return;
+
+ hns = rxq->hns;
+ hw = &hns->hw;
+ idx = rxq->queue_id;
+ if (hw->fkq_data.rx_queues[idx]) {
+ hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
+ hw->fkq_data.rx_queues[idx] = NULL;
+ }
+
+ /* free fake rx queue arrays */
+ if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {
+ hw->fkq_data.nb_fake_rx_queues = 0;
+ rte_free(hw->fkq_data.rx_queues);
+ hw->fkq_data.rx_queues = NULL;
+ }
+}
+
+static void
+hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)
{
+ struct hns3_tx_queue *txq = queue;
+ struct hns3_adapter *hns;
+ struct hns3_hw *hw;
+ uint16_t idx;
+
+ if (txq == NULL)
+ return;
+
+ hns = txq->hns;
+ hw = &hns->hw;
+ idx = txq->queue_id;
+ if (hw->fkq_data.tx_queues[idx]) {
+ hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
+ hw->fkq_data.tx_queues[idx] = NULL;
+ }
+
+ /* free fake tx queue arrays */
+ if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {
+ hw->fkq_data.nb_fake_tx_queues = 0;
+ rte_free(hw->fkq_data.tx_queues);
+ hw->fkq_data.tx_queues = NULL;
+ }
+}
+
+static void
+hns3_free_rx_queues(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_fake_queue_data *fkq_data;
+ struct hns3_hw *hw = &hns->hw;
+ uint16_t nb_rx_q;
uint16_t i;
- if (dev->data->rx_queues)
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ nb_rx_q = hw->data->nb_rx_queues;
+ for (i = 0; i < nb_rx_q; i++) {
+ if (dev->data->rx_queues[i]) {
hns3_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
}
+ }
+
+ /* Free fake Rx queues */
+ fkq_data = &hw->fkq_data;
+ for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {
+ if (fkq_data->rx_queues[i])
+ hns3_fake_rx_queue_release(fkq_data->rx_queues[i]);
+ }
+}
- if (dev->data->tx_queues)
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
+static void
+hns3_free_tx_queues(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_fake_queue_data *fkq_data;
+ struct hns3_hw *hw = &hns->hw;
+ uint16_t nb_tx_q;
+ uint16_t i;
+
+ nb_tx_q = hw->data->nb_tx_queues;
+ for (i = 0; i < nb_tx_q; i++) {
+ if (dev->data->tx_queues[i]) {
hns3_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
}
+ }
+
+ /* Free fake Tx queues */
+ fkq_data = &hw->fkq_data;
+ for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {
+ if (fkq_data->tx_queues[i])
+ hns3_fake_tx_queue_release(fkq_data->tx_queues[i]);
+ }
+}
+
+void
+hns3_free_all_queues(struct rte_eth_dev *dev)
+{
+ hns3_free_rx_queues(dev);
+ hns3_free_tx_queues(dev);
}
static int
static void
hns3_enable_all_queues(struct hns3_hw *hw, bool en)
{
+ uint16_t nb_rx_q = hw->data->nb_rx_queues;
+ uint16_t nb_tx_q = hw->data->nb_tx_queues;
struct hns3_rx_queue *rxq;
struct hns3_tx_queue *txq;
uint32_t rcb_reg;
int i;
- for (i = 0; i < hw->data->nb_rx_queues; i++) {
- rxq = hw->data->rx_queues[i];
- txq = hw->data->tx_queues[i];
+ for (i = 0; i < hw->cfg_max_queues; i++) {
+ if (i < nb_rx_q)
+ rxq = hw->data->rx_queues[i];
+ else
+ rxq = hw->fkq_data.rx_queues[i - nb_rx_q];
+ if (i < nb_tx_q)
+ txq = hw->data->tx_queues[i];
+ else
+ txq = hw->fkq_data.tx_queues[i - nb_tx_q];
if (rxq == NULL || txq == NULL ||
(en && (rxq->rx_deferred_start || txq->tx_deferred_start)))
continue;
+
rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);
if (en)
rcb_reg |= BIT(HNS3_RING_EN_B);
hns3_reset_all_queues(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
- int ret;
- uint16_t i;
+ int ret, i;
- for (i = 0; i < hw->data->nb_rx_queues; i++) {
+ for (i = 0; i < hw->cfg_max_queues; i++) {
ret = hns3_reset_queue(hns, i);
if (ret) {
hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
PMD_INIT_FUNC_TRACE();
- rxq = hw->data->rx_queues[idx];
-
+ rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];
ret = hns3_alloc_rx_queue_mbufs(hw, rxq);
if (ret) {
hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d",
- idx, ret);
+ idx, ret);
return ret;
}
}
static void
-hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
{
struct hns3_hw *hw = &hns->hw;
- struct hns3_tx_queue *txq;
+ struct hns3_rx_queue *rxq;
+
+ rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];
+ rxq->next_to_use = 0;
+ rxq->next_to_clean = 0;
+ hns3_init_rx_queue_hw(rxq);
+}
+
+static void
+hns3_init_tx_queue(struct hns3_tx_queue *queue)
+{
+ struct hns3_tx_queue *txq = queue;
struct hns3_desc *desc;
int i;
- txq = hw->data->tx_queues[idx];
-
/* Clear tx bd */
desc = txq->tx_ring;
for (i = 0; i < txq->nb_tx_desc; i++) {
txq->next_to_use = 0;
txq->next_to_clean = 0;
- txq->tx_bd_ready = txq->nb_tx_desc;
+ txq->tx_bd_ready = txq->nb_tx_desc;
hns3_init_tx_queue_hw(txq);
}
+static void
+hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_tx_queue *txq;
+
+ txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];
+ hns3_init_tx_queue(txq);
+}
+
+static void
+hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_tx_queue *txq;
+
+ txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];
+ hns3_init_tx_queue(txq);
+}
+
static void
hns3_init_tx_ring_tc(struct hns3_adapter *hns)
{
for (j = 0; j < tc_queue->tqp_count; j++) {
num = tc_queue->tqp_offset + j;
- txq = hw->data->tx_queues[num];
+ txq = (struct hns3_tx_queue *)hw->data->tx_queues[num];
if (txq == NULL)
continue;
}
}
-int
-hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
+static int
+hns3_start_rx_queues(struct hns3_adapter *hns)
{
struct hns3_hw *hw = &hns->hw;
- struct rte_eth_dev_data *dev_data = hw->data;
struct hns3_rx_queue *rxq;
- struct hns3_tx_queue *txq;
+ int i, j;
int ret;
- int i;
- int j;
/* Initialize RSS for queues */
ret = hns3_config_rss(hns);
return ret;
}
- if (reset_queue) {
- ret = hns3_reset_all_queues(hns);
- if (ret) {
- hns3_err(hw, "Failed to reset all queues %d", ret);
- return ret;
- }
- }
-
- /*
- * Hardware does not support where the number of rx and tx queues is
- * not equal in hip08. In .dev_configure callback function we will
- * check the two values, here we think that the number of rx and tx
- * queues is equal.
- */
for (i = 0; i < hw->data->nb_rx_queues; i++) {
- rxq = dev_data->rx_queues[i];
- txq = dev_data->tx_queues[i];
- if (rxq == NULL || txq == NULL || rxq->rx_deferred_start ||
- txq->tx_deferred_start)
+ rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];
+ if (rxq == NULL || rxq->rx_deferred_start)
continue;
-
ret = hns3_dev_rx_queue_start(hns, i);
if (ret) {
hns3_err(hw, "Failed to start No.%d rx queue: %d", i,
ret);
goto out;
}
- hns3_dev_tx_queue_start(hns, i);
}
- hns3_init_tx_ring_tc(hns);
- hns3_enable_all_queues(hw, true);
+ for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {
+ rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];
+ if (rxq == NULL || rxq->rx_deferred_start)
+ continue;
+ hns3_fake_rx_queue_start(hns, i);
+ }
return 0;
out:
for (j = 0; j < i; j++) {
- rxq = dev_data->rx_queues[j];
+ rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];
hns3_rx_queue_release_mbufs(rxq);
}
return ret;
}
+static void
+hns3_start_tx_queues(struct hns3_adapter *hns)
+{
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_tx_queue *txq;
+ int i;
+
+ for (i = 0; i < hw->data->nb_tx_queues; i++) {
+ txq = (struct hns3_tx_queue *)hw->data->tx_queues[i];
+ if (txq == NULL || txq->tx_deferred_start)
+ continue;
+ hns3_dev_tx_queue_start(hns, i);
+ }
+
+ for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {
+ txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];
+ if (txq == NULL || txq->tx_deferred_start)
+ continue;
+ hns3_fake_tx_queue_start(hns, i);
+ }
+
+ hns3_init_tx_ring_tc(hns);
+}
+
+int
+hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
+{
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ if (reset_queue) {
+ ret = hns3_reset_all_queues(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to reset all queues %d", ret);
+ return ret;
+ }
+ }
+
+ ret = hns3_start_rx_queues(hns);
+ if (ret) {
+ hns3_err(hw, "Failed to start rx queues: %d", ret);
+ return ret;
+ }
+
+ hns3_start_tx_queues(hns);
+ hns3_enable_all_queues(hw, true);
+
+ return 0;
+}
+
int
hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)
{
return 0;
}
+static void*
+hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
+ struct hns3_queue_info *q_info)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ const struct rte_memzone *rx_mz;
+ struct hns3_rx_queue *rxq;
+ unsigned int rx_desc;
+
+ rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
+ RTE_CACHE_LINE_SIZE, q_info->socket_id);
+ if (rxq == NULL) {
+ hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
+ q_info->idx);
+ return NULL;
+ }
+
+ /* Allocate rx ring hardware descriptors. */
+ rxq->queue_id = q_info->idx;
+ rxq->nb_rx_desc = q_info->nb_desc;
+ rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);
+ rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
+ rx_desc, HNS3_RING_BASE_ALIGN,
+ q_info->socket_id);
+ if (rx_mz == NULL) {
+ hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
+ q_info->idx);
+ hns3_rx_queue_release(rxq);
+ return NULL;
+ }
+ rxq->mz = rx_mz;
+ rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
+ rxq->rx_ring_phys_addr = rx_mz->iova;
+
+ hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
+ rxq->rx_ring_phys_addr);
+
+ return rxq;
+}
+
+static int
+hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t nb_desc, unsigned int socket_id)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_queue_info q_info;
+ struct hns3_rx_queue *rxq;
+ uint16_t nb_rx_q;
+
+ if (hw->fkq_data.rx_queues[idx]) {
+ hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);
+ hw->fkq_data.rx_queues[idx] = NULL;
+ }
+
+ q_info.idx = idx;
+ q_info.socket_id = socket_id;
+ q_info.nb_desc = nb_desc;
+ q_info.type = "hns3 fake RX queue";
+ q_info.ring_name = "rx_fake_ring";
+ rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
+ if (rxq == NULL) {
+ hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
+ return -ENOMEM;
+ }
+
+ /* Don't need alloc sw_ring, because upper applications don't use it */
+ rxq->sw_ring = NULL;
+
+ rxq->hns = hns;
+ rxq->rx_deferred_start = false;
+ rxq->port_id = dev->data->port_id;
+ rxq->configured = true;
+ nb_rx_q = dev->data->nb_rx_queues;
+ rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
+ (nb_rx_q + idx) * HNS3_TQP_REG_SIZE);
+ rxq->rx_buf_len = hw->rx_buf_len;
+
+ rte_spinlock_lock(&hw->lock);
+ hw->fkq_data.rx_queues[idx] = rxq;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static void*
+hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
+ struct hns3_queue_info *q_info)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ const struct rte_memzone *tx_mz;
+ struct hns3_tx_queue *txq;
+ struct hns3_desc *desc;
+ unsigned int tx_desc;
+ int i;
+
+ txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
+ RTE_CACHE_LINE_SIZE, q_info->socket_id);
+ if (txq == NULL) {
+ hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
+ q_info->idx);
+ return NULL;
+ }
+
+ /* Allocate tx ring hardware descriptors. */
+ txq->queue_id = q_info->idx;
+ txq->nb_tx_desc = q_info->nb_desc;
+ tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);
+ tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,
+ tx_desc, HNS3_RING_BASE_ALIGN,
+ q_info->socket_id);
+ if (tx_mz == NULL) {
+ hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
+ q_info->idx);
+ hns3_tx_queue_release(txq);
+ return NULL;
+ }
+ txq->mz = tx_mz;
+ txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
+ txq->tx_ring_phys_addr = tx_mz->iova;
+
+ hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
+ txq->tx_ring_phys_addr);
+
+ /* Clear tx bd */
+ desc = txq->tx_ring;
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ desc->tx.tp_fe_sc_vld_ra_ri = 0;
+ desc++;
+ }
+
+ return txq;
+}
+
+static int
+hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t nb_desc, unsigned int socket_id)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ struct hns3_queue_info q_info;
+ struct hns3_tx_queue *txq;
+ uint16_t nb_tx_q;
+
+ if (hw->fkq_data.tx_queues[idx] != NULL) {
+ hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);
+ hw->fkq_data.tx_queues[idx] = NULL;
+ }
+
+ q_info.idx = idx;
+ q_info.socket_id = socket_id;
+ q_info.nb_desc = nb_desc;
+ q_info.type = "hns3 fake TX queue";
+ q_info.ring_name = "tx_fake_ring";
+ txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
+ if (txq == NULL) {
+ hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
+ return -ENOMEM;
+ }
+
+ /* Don't need alloc sw_ring, because upper applications don't use it */
+ txq->sw_ring = NULL;
+
+ txq->hns = hns;
+ txq->tx_deferred_start = false;
+ txq->port_id = dev->data->port_id;
+ txq->configured = true;
+ nb_tx_q = dev->data->nb_tx_queues;
+ txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
+ (nb_tx_q + idx) * HNS3_TQP_REG_SIZE);
+
+ rte_spinlock_lock(&hw->lock);
+ hw->fkq_data.tx_queues[idx] = txq;
+ rte_spinlock_unlock(&hw->lock);
+
+ return 0;
+}
+
+static int
+hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;
+ void **rxq;
+ uint8_t i;
+
+ if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
+ /* first time configuration */
+
+ uint32_t size;
+ size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
+ hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
+ RTE_CACHE_LINE_SIZE);
+ if (hw->fkq_data.rx_queues == NULL) {
+ hw->fkq_data.nb_fake_rx_queues = 0;
+ return -ENOMEM;
+ }
+ } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
+ /* re-configure */
+
+ rxq = hw->fkq_data.rx_queues;
+ for (i = nb_queues; i < old_nb_queues; i++)
+ hns3_dev_rx_queue_release(rxq[i]);
+
+ rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL)
+ return -ENOMEM;
+ if (nb_queues > old_nb_queues) {
+ uint16_t new_qs = nb_queues - old_nb_queues;
+ memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);
+ }
+
+ hw->fkq_data.rx_queues = rxq;
+ } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
+ rxq = hw->fkq_data.rx_queues;
+ for (i = nb_queues; i < old_nb_queues; i++)
+ hns3_dev_rx_queue_release(rxq[i]);
+
+ rte_free(hw->fkq_data.rx_queues);
+ hw->fkq_data.rx_queues = NULL;
+ }
+
+ hw->fkq_data.nb_fake_rx_queues = nb_queues;
+
+ return 0;
+}
+
+static int
+hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;
+ void **txq;
+ uint8_t i;
+
+ if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
+ /* first time configuration */
+
+ uint32_t size;
+ size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
+ hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
+ RTE_CACHE_LINE_SIZE);
+ if (hw->fkq_data.tx_queues == NULL) {
+ hw->fkq_data.nb_fake_tx_queues = 0;
+ return -ENOMEM;
+ }
+ } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
+ /* re-configure */
+
+ txq = hw->fkq_data.tx_queues;
+ for (i = nb_queues; i < old_nb_queues; i++)
+ hns3_dev_tx_queue_release(txq[i]);
+ txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (txq == NULL)
+ return -ENOMEM;
+ if (nb_queues > old_nb_queues) {
+ uint16_t new_qs = nb_queues - old_nb_queues;
+ memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);
+ }
+
+ hw->fkq_data.tx_queues = txq;
+ } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
+ txq = hw->fkq_data.tx_queues;
+ for (i = nb_queues; i < old_nb_queues; i++)
+ hns3_dev_tx_queue_release(txq[i]);
+
+ rte_free(hw->fkq_data.tx_queues);
+ hw->fkq_data.tx_queues = NULL;
+ }
+ hw->fkq_data.nb_fake_tx_queues = nb_queues;
+
+ return 0;
+}
+
+int
+hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
+ uint16_t nb_tx_q)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t rx_need_add_nb_q;
+ uint16_t tx_need_add_nb_q;
+ uint16_t port_id;
+ uint16_t q;
+ int ret;
+
+ /* Setup new number of fake RX/TX queues and reconfigure device. */
+ hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
+ rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;
+ tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;
+ ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);
+ if (ret) {
+ hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
+ goto cfg_fake_rx_q_fail;
+ }
+
+ ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
+ if (ret) {
+ hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
+ goto cfg_fake_tx_q_fail;
+ }
+
+ /* Allocate and set up fake RX queue per Ethernet port. */
+ port_id = hw->data->port_id;
+ for (q = 0; q < rx_need_add_nb_q; q++) {
+ ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
+ rte_eth_dev_socket_id(port_id));
+ if (ret)
+ goto setup_fake_rx_q_fail;
+ }
+
+ /* Allocate and set up fake TX queue per Ethernet port. */
+ for (q = 0; q < tx_need_add_nb_q; q++) {
+ ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,
+ rte_eth_dev_socket_id(port_id));
+ if (ret)
+ goto setup_fake_tx_q_fail;
+ }
+
+ return 0;
+
+setup_fake_tx_q_fail:
+setup_fake_rx_q_fail:
+ (void)hns3_fake_tx_queue_config(hw, 0);
+cfg_fake_tx_q_fail:
+ (void)hns3_fake_rx_queue_config(hw, 0);
+cfg_fake_rx_q_fail:
+ hw->cfg_max_queues = 0;
+
+ return ret;
+}
+
void
hns3_dev_release_mbufs(struct hns3_adapter *hns)
{
struct rte_mempool *mp)
{
struct hns3_adapter *hns = dev->data->dev_private;
- const struct rte_memzone *rx_mz;
struct hns3_hw *hw = &hns->hw;
+ struct hns3_queue_info q_info;
struct hns3_rx_queue *rxq;
- unsigned int desc_size = sizeof(struct hns3_desc);
- unsigned int rx_desc;
int rx_entry_len;
if (dev->data->dev_started) {
dev->data->rx_queues[idx] = NULL;
}
- rxq = rte_zmalloc_socket("hns3 RX queue", sizeof(struct hns3_rx_queue),
- RTE_CACHE_LINE_SIZE, socket_id);
+ q_info.idx = idx;
+ q_info.socket_id = socket_id;
+ q_info.nb_desc = nb_desc;
+ q_info.type = "hns3 RX queue";
+ q_info.ring_name = "rx_ring";
+ rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
if (rxq == NULL) {
- hns3_err(hw, "Failed to allocate memory for rx queue!");
+ hns3_err(hw,
+ "Failed to alloc mem and reserve DMA mem for rx ring!");
return -ENOMEM;
}
rxq->hns = hns;
rxq->mb_pool = mp;
- rxq->nb_rx_desc = nb_desc;
- rxq->queue_id = idx;
if (conf->rx_free_thresh <= 0)
rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH;
else
return -ENOMEM;
}
- /* Allocate rx ring hardware descriptors. */
- rx_desc = rxq->nb_rx_desc * desc_size;
- rx_mz = rte_eth_dma_zone_reserve(dev, "rx_ring", idx, rx_desc,
- HNS3_RING_BASE_ALIGN, socket_id);
- if (rx_mz == NULL) {
- hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
- idx);
- hns3_rx_queue_release(rxq);
- return -ENOMEM;
- }
- rxq->mz = rx_mz;
- rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
- rxq->rx_ring_phys_addr = rx_mz->iova;
-
- hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, idx,
- rxq->rx_ring_phys_addr);
-
rxq->next_to_use = 0;
rxq->next_to_clean = 0;
rxq->nb_rx_hold = 0;
unsigned int socket_id, const struct rte_eth_txconf *conf)
{
struct hns3_adapter *hns = dev->data->dev_private;
- const struct rte_memzone *tx_mz;
struct hns3_hw *hw = &hns->hw;
+ struct hns3_queue_info q_info;
struct hns3_tx_queue *txq;
- struct hns3_desc *desc;
- unsigned int desc_size = sizeof(struct hns3_desc);
- unsigned int tx_desc;
int tx_entry_len;
- int i;
if (dev->data->dev_started) {
hns3_err(hw, "tx_queue_setup after dev_start no supported");
dev->data->tx_queues[idx] = NULL;
}
- txq = rte_zmalloc_socket("hns3 TX queue", sizeof(struct hns3_tx_queue),
- RTE_CACHE_LINE_SIZE, socket_id);
+ q_info.idx = idx;
+ q_info.socket_id = socket_id;
+ q_info.nb_desc = nb_desc;
+ q_info.type = "hns3 TX queue";
+ q_info.ring_name = "tx_ring";
+ txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
if (txq == NULL) {
- hns3_err(hw, "Failed to allocate memory for tx queue!");
+ hns3_err(hw,
+ "Failed to alloc mem and reserve DMA mem for tx ring!");
return -ENOMEM;
}
- txq->nb_tx_desc = nb_desc;
- txq->queue_id = idx;
txq->tx_deferred_start = conf->tx_deferred_start;
-
tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;
txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len,
RTE_CACHE_LINE_SIZE, socket_id);
return -ENOMEM;
}
- /* Allocate tx ring hardware descriptors. */
- tx_desc = txq->nb_tx_desc * desc_size;
- tx_mz = rte_eth_dma_zone_reserve(dev, "tx_ring", idx, tx_desc,
- HNS3_RING_BASE_ALIGN, socket_id);
- if (tx_mz == NULL) {
- hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
- idx);
- hns3_tx_queue_release(txq);
- return -ENOMEM;
- }
- txq->mz = tx_mz;
- txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
- txq->tx_ring_phys_addr = tx_mz->iova;
-
- hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, idx,
- txq->tx_ring_phys_addr);
-
- /* Clear tx bd */
- desc = txq->tx_ring;
- for (i = 0; i < txq->nb_tx_desc; i++) {
- desc->tx.tp_fe_sc_vld_ra_ri = 0;
- desc++;
- }
-
txq->hns = hns;
txq->next_to_use = 0;
txq->next_to_clean = 0;
- txq->tx_bd_ready = txq->nb_tx_desc;
+ txq->tx_bd_ready = txq->nb_tx_desc;
txq->port_id = dev->data->port_id;
txq->configured = true;
txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +