#include "ice_rxtx.h"
-#define ICE_TD_CMD ICE_TX_DESC_CMD_EOP
-
#define ICE_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
RTE_PKTMBUF_HEADROOM);
/* Check if scattered RX needs to be used. */
- if ((rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size)
+ if (rxq->max_pkt_len > buf_size)
dev->data->scattered_rx = 1;
rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
/* Free all mbufs for descriptors in rx queue */
static void
-ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
+_ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
{
uint16_t i;
#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
}
+static void
+ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
+{
+ rxq->rx_rel_mbufs(rxq);
+}
+
/* turn on or off rx queue
* @q_idx: queue index in pf scope
* @on: turn on or off the queue
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
}
int
/* Init the Tx tail register*/
ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
- err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1, &txq_elem,
- sizeof(txq_elem), NULL);
+ /* Fix me, we assume TC always 0 here */
+ err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
+ &txq_elem, sizeof(txq_elem), NULL);
if (err) {
PMD_DRV_LOG(ERR, "Failed to add lan txq");
return -EIO;
/* Free all mbufs for descriptors in tx queue */
static void
-ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
+_ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
{
uint16_t i;
}
}
}
+static void
+ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
+{
+ txq->tx_rel_mbufs(txq);
+}
static void
ice_reset_tx_queue(struct ice_tx_queue *txq)
{
struct ice_tx_queue *txq;
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
enum ice_status status;
uint16_t q_ids[1];
uint32_t q_teids[1];
+ uint16_t q_handle = tx_queue_id;
if (tx_queue_id >= dev->data->nb_tx_queues) {
PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
q_ids[0] = txq->reg_idx;
q_teids[0] = txq->q_teid;
- status = ice_dis_vsi_txq(hw->port_info, 1, q_ids, q_teids,
- ICE_NO_RESET, 0, NULL);
+ /* Fix me, we assume TC always 0 here */
+ status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
+ q_ids, q_teids, ICE_NO_RESET, 0, NULL);
if (status != ICE_SUCCESS) {
PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
return -EINVAL;
ice_reset_rx_queue(rxq);
rxq->q_set = TRUE;
dev->data->rx_queues[queue_idx] = rxq;
+ rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
ice_reset_tx_queue(txq);
txq->q_set = TRUE;
dev->data->tx_queues[queue_idx] = txq;
+ txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
return 0;
}
#endif
dev->rx_pkt_burst == ice_recv_scattered_pkts)
return ptypes;
+
+#ifdef RTE_ARCH_X86
+ if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
+ return ptypes;
+#endif
+
return NULL;
}
PMD_INIT_FUNC_TRACE();
struct ice_adapter *ad =
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+#ifdef RTE_ARCH_X86
+ struct ice_rx_queue *rxq;
+ int i;
+ bool use_avx2 = false;
+
+ if (!ice_rx_vec_dev_check(dev)) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ (void)ice_rxq_vec_setup(rxq);
+ }
+
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+ use_avx2 = true;
+
+ if (dev->data->scattered_rx) {
+ PMD_DRV_LOG(DEBUG,
+ "Using %sVector Scattered Rx (port %d).",
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ dev->rx_pkt_burst = use_avx2 ?
+ ice_recv_scattered_pkts_vec_avx2 :
+ ice_recv_scattered_pkts_vec;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ dev->rx_pkt_burst = use_avx2 ?
+ ice_recv_pkts_vec_avx2 :
+ ice_recv_pkts_vec;
+ }
+
+ return;
+ }
+#endif
if (dev->data->scattered_rx) {
/* Set the non-LRO scattered function */
{
struct ice_adapter *ad =
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+#ifdef RTE_ARCH_X86
+ struct ice_tx_queue *txq;
+ int i;
+ bool use_avx2 = false;
+
+ if (!ice_tx_vec_dev_check(dev)) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ (void)ice_txq_vec_setup(txq);
+ }
+
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+ use_avx2 = true;
+
+ PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ dev->tx_pkt_burst = use_avx2 ?
+ ice_xmit_pkts_vec_avx2 :
+ ice_xmit_pkts_vec;
+ dev->tx_pkt_prepare = NULL;
+
+ return;
+ }
+#endif
if (ad->tx_simple_allowed) {
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");