txq->q_set = TRUE;
dev->data->tx_queues[queue_idx] = txq;
txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
+ ice_set_tx_function_flag(dev, txq);
return 0;
}
}
}
+void __attribute__((cold))
+ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
+{
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Use a simple Tx queue if possible (only fast free is allowed) */
+ ad->tx_simple_allowed =
+ (txq->offloads ==
+ (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
+
+ if (ad->tx_simple_allowed)
+ PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
+ txq->queue_id);
+ else
+ PMD_INIT_LOG(DEBUG,
+ "Simple Tx can NOT be enabled on Tx queue %u.",
+ txq->queue_id);
+}
+
/*********************************************************************
*
* TX prep functions
void ice_set_rx_function(struct rte_eth_dev *dev);
uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+void ice_set_tx_function_flag(struct rte_eth_dev *dev,
+ struct ice_tx_queue *txq);
void ice_set_tx_function(struct rte_eth_dev *dev);
uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,