}
static void
-otx2_nix_rx_queue_release(void *rx_queue)
+otx2_nix_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct otx2_eth_rxq *rxq = rx_queue;
+ struct otx2_eth_rxq *rxq = dev->data->rx_queues[qid];
if (!rxq)
return;
otx2_nix_dbg("Releasing rxq %u", rxq->rq);
nix_cq_rq_uninit(rxq->eth_dev, rxq);
- rte_free(rx_queue);
+ rte_free(rxq);
+ dev->data->rx_queues[qid] = NULL;
}
static int
/* Free memory prior to re-allocation if needed */
if (eth_dev->data->rx_queues[rq] != NULL) {
otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
- otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
+ otx2_nix_rx_queue_release(eth_dev, rq);
rte_eth_dma_zone_free(eth_dev, "cq", rq);
- eth_dev->data->rx_queues[rq] = NULL;
}
offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
rxq->tstamp = &dev->tstamp;
+ eth_dev->data->rx_queues[rq] = rxq;
+
/* Alloc completion queue */
rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
if (rc) {
otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
rq, mp->name, qsize, nb_desc, rxq->qlen);
- eth_dev->data->rx_queues[rq] = rxq;
eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
/* Calculating delta and freq mult between PTP HI clock and tsc.
return 0;
free_rxq:
- otx2_nix_rx_queue_release(rxq);
+ otx2_nix_rx_queue_release(eth_dev, rq);
fail:
return rc;
}
mbp_priv = rte_mempool_get_priv(rxq->pool);
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
- if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+ if (eth_dev->data->mtu + (uint32_t)NIX_L2_OVERHEAD > buffsz) {
dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
0, 0, dev->node,
- MEMPOOL_F_NO_SPREAD);
+ RTE_MEMPOOL_F_NO_SPREAD);
txq->nb_sqb_bufs = nb_sqb_bufs;
txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
txq->nb_sqb_bufs_adj = nb_sqb_bufs -
goto fail;
}
- tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
+ tmp = rte_mempool_calc_obj_size(blk_sz, RTE_MEMPOOL_F_NO_SPREAD, &sz);
if (dev->sqb_size != sz.elt_size) {
otx2_err("sqe pool block size is not expected %d != %d",
dev->sqb_size, tmp);
}
static void
-otx2_nix_tx_queue_release(void *_txq)
+otx2_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
{
- struct otx2_eth_txq *txq = _txq;
- struct rte_eth_dev *eth_dev;
+ struct otx2_eth_txq *txq = eth_dev->data->tx_queues[qid];
if (!txq)
return;
- eth_dev = txq->dev->eth_dev;
-
otx2_nix_dbg("Releasing txq %u", txq->sq);
/* Flush and disable tm */
}
otx2_nix_sq_flush_post(txq);
rte_free(txq);
+ eth_dev->data->tx_queues[qid] = NULL;
}
/* Free memory prior to re-allocation if needed. */
if (eth_dev->data->tx_queues[sq] != NULL) {
otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
- otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
- eth_dev->data->tx_queues[sq] = NULL;
+ otx2_nix_tx_queue_release(eth_dev, sq);
}
/* Find the expected offloads for this queue */
txq->sqb_pool = NULL;
txq->offloads = offloads;
dev->tx_offloads |= offloads;
+ eth_dev->data->tx_queues[sq] = txq;
/*
* Allocate memory for flow control updates from HW.
" lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
- eth_dev->data->tx_queues[sq] = txq;
eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
free_txq:
- otx2_nix_tx_queue_release(txq);
+ otx2_nix_tx_queue_release(eth_dev, sq);
fail:
return rc;
}
}
memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
tx_qconf[i].valid = true;
- otx2_nix_tx_queue_release(txq[i]);
- eth_dev->data->tx_queues[i] = NULL;
+ otx2_nix_tx_queue_release(eth_dev, i);
}
rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
}
memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
rx_qconf[i].valid = true;
- otx2_nix_rx_queue_release(rxq[i]);
- eth_dev->data->rx_queues[i] = NULL;
+ otx2_nix_rx_queue_release(eth_dev, i);
}
dev->tx_qconf = tx_qconf;
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
- struct otx2_eth_txq **txq;
- struct otx2_eth_rxq **rxq;
int rc, i, nb_rxq, nb_txq;
nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
&tx_qconf[i].conf.tx);
if (rc) {
otx2_err("Failed to setup tx queue rc=%d", rc);
- txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
for (i -= 1; i >= 0; i--)
- otx2_nix_tx_queue_release(txq[i]);
+ otx2_nix_tx_queue_release(eth_dev, i);
goto fail;
}
}
rx_qconf[i].mempool);
if (rc) {
otx2_err("Failed to setup rx queue rc=%d", rc);
- rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
for (i -= 1; i >= 0; i--)
- otx2_nix_rx_queue_release(rxq[i]);
+ otx2_nix_rx_queue_release(eth_dev, i);
goto release_tx_queues;
}
}
return 0;
release_tx_queues:
- txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
- otx2_nix_tx_queue_release(txq[i]);
+ otx2_nix_tx_queue_release(eth_dev, i);
fail:
if (tx_qconf)
free(tx_qconf);
int rc, max_entries;
eth_dev->dev_ops = &otx2_eth_dev_ops;
- eth_dev->rx_descriptor_done = otx2_nix_rx_descriptor_done;
eth_dev->rx_queue_count = otx2_nix_rx_queue_count;
eth_dev->rx_descriptor_status = otx2_nix_rx_descriptor_status;
eth_dev->tx_descriptor_status = otx2_nix_tx_descriptor_status;
dev->ops = NULL;
/* Free up SQs */
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
- eth_dev->data->tx_queues[i] = NULL;
- }
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ otx2_nix_tx_queue_release(eth_dev, i);
eth_dev->data->nb_tx_queues = 0;
/* Free up RQ's and CQ's */
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
- eth_dev->data->rx_queues[i] = NULL;
- }
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+ otx2_nix_rx_queue_release(eth_dev, i);
eth_dev->data->nb_rx_queues = 0;
/* Free tm resources */