/* Check if the descriptor ring needs to be cleaned. */
if (txq->nb_tx_free < txq->tx_free_thresh)
- i40e_xmit_cleanup(txq);
+ (void)i40e_xmit_cleanup(txq);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
td_cmd = 0;
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+ rte_cio_wmb();
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
PMD_INIT_FUNC_TRACE();
rxq = dev->data->rx_queues[rx_queue_id];
+ if (!rxq || !rxq->q_set) {
+ PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
+ rx_queue_id);
+ return -EINVAL;
+ }
+
+ if (rxq->rx_deferred_start)
+ PMD_DRV_LOG(WARNING, "RX queue %u is deferrd start",
+ rx_queue_id);
err = i40e_alloc_rx_queue_mbufs(rxq);
if (err) {
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
rxq = dev->data->rx_queues[rx_queue_id];
+ if (!rxq || !rxq->q_set) {
+ PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
+ rx_queue_id);
+ return -EINVAL;
+ }
/*
* rx_queue_id is queue id application refers to, while
PMD_INIT_FUNC_TRACE();
txq = dev->data->tx_queues[tx_queue_id];
+ if (!txq || !txq->q_set) {
+ PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
+ tx_queue_id);
+ return -EINVAL;
+ }
+
+ if (txq->tx_deferred_start)
+ PMD_DRV_LOG(WARNING, "TX queue %u is deferrd start",
+ tx_queue_id);
/*
* tx_queue_id is queue id application refers to, while
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
txq = dev->data->tx_queues[tx_queue_id];
+ if (!txq || !txq->q_set) {
+ PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
+ tx_queue_id);
+ return -EINVAL;
+ }
/*
* tx_queue_id is queue id application refers to, while
}
}
+static int
+i40e_tx_done_cleanup_full(struct i40e_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ struct i40e_tx_entry *swr_ring = txq->sw_ring;
+ uint16_t i, tx_last, tx_id;
+ uint16_t nb_tx_free_last;
+ uint16_t nb_tx_to_clean;
+ uint32_t pkt_cnt;
+
+ /* Start free mbuf from the next of tx_tail */
+ tx_last = txq->tx_tail;
+ tx_id = swr_ring[tx_last].next_id;
+
+ if (txq->nb_tx_free == 0 && i40e_xmit_cleanup(txq))
+ return 0;
+
+ nb_tx_to_clean = txq->nb_tx_free;
+ nb_tx_free_last = txq->nb_tx_free;
+ if (!free_cnt)
+ free_cnt = txq->nb_tx_desc;
+
+ /* Loop through swr_ring to count the amount of
+ * freeable mubfs and packets.
+ */
+ for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+ for (i = 0; i < nb_tx_to_clean &&
+ pkt_cnt < free_cnt &&
+ tx_id != tx_last; i++) {
+ if (swr_ring[tx_id].mbuf != NULL) {
+ rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+ swr_ring[tx_id].mbuf = NULL;
+
+ /*
+ * last segment in the packet,
+ * increment packet count
+ */
+ pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+ }
+
+ tx_id = swr_ring[tx_id].next_id;
+ }
+
+ if (txq->tx_rs_thresh > txq->nb_tx_desc -
+ txq->nb_tx_free || tx_id == tx_last)
+ break;
+
+ if (pkt_cnt < free_cnt) {
+ if (i40e_xmit_cleanup(txq))
+ break;
+
+ nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+ nb_tx_free_last = txq->nb_tx_free;
+ }
+ }
+
+ return (int)pkt_cnt;
+}
+
+static int
+i40e_tx_done_cleanup_simple(struct i40e_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ int i, n, cnt;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
+
+ for (i = 0; i < cnt; i += n) {
+ if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
+ break;
+
+ n = i40e_tx_free_bufs(txq);
+
+ if (n == 0)
+ break;
+ }
+
+ return i;
+}
+
+static int
+i40e_tx_done_cleanup_vec(struct i40e_tx_queue *txq __rte_unused,
+ uint32_t free_cnt __rte_unused)
+{
+ return -ENOTSUP;
+}
+int
+i40e_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
+ struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (ad->tx_simple_allowed) {
+ if (ad->tx_vec_allowed)
+ return i40e_tx_done_cleanup_vec(q, free_cnt);
+ else
+ return i40e_tx_done_cleanup_simple(q, free_cnt);
+ } else {
+ return i40e_tx_done_cleanup_full(q, free_cnt);
+ }
+}
+
void
i40e_reset_tx_queue(struct i40e_tx_queue *txq)
{
continue;
i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
+ rte_eth_dma_zone_free(dev, "rx_ring", i);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
continue;
i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
+ rte_eth_dma_zone_free(dev, "tx_ring", i);
}
}
-#define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC
-#define I40E_FDIR_NUM_RX_DESC I40E_MIN_RING_DESC
-
enum i40e_status_code
i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
{
struct i40e_tx_queue *txq;
const struct rte_memzone *tz = NULL;
- uint32_t ring_size;
struct rte_eth_dev *dev;
+ uint32_t ring_size;
if (!pf) {
PMD_DRV_LOG(ERR, "PF is not available");
txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+
/*
* don't need to allocate software ring and reset for the fdir
* program queue just set the queue has been configured.
*/
txq->q_set = TRUE;
pf->fdir.txq = txq;
+ pf->fdir.txq_available_buf_count = I40E_FDIR_PRG_PKT_CNT;
return I40E_SUCCESS;
}
i40e_recv_pkts_vec;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_rx_function(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
return ret;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
{
struct i40e_adapter *ad =
return i40e_xmit_pkts_vec;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_tx_function(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
return ret;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_default_ptype_table(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
ad->ptype_tbl[i] = i40e_get_default_pkt_type(i);
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_default_pctype_table(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =