#include "ice_rxtx.h"
-#define ICE_TD_CMD ICE_TX_DESC_CMD_EOP
-
#define ICE_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 0;
+ rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
if (err) {
/* Free all mbufs for descriptors in rx queue */
static void
-ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
+_ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
{
uint16_t i;
#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
}
+static void
+ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
+{
+ rxq->rx_rel_mbufs(rxq);
+}
+
/* turn on or off rx queue
* @q_idx: queue index in pf scope
* @on: turn on or off the queue
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
}
int
/* Free all mbufs for descriptors in tx queue */
static void
-ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
+_ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
{
uint16_t i;
}
}
}
+static void
+ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
+{
+ txq->tx_rel_mbufs(txq);
+}
static void
ice_reset_tx_queue(struct ice_tx_queue *txq)
ice_reset_rx_queue(rxq);
rxq->q_set = TRUE;
dev->data->rx_queues[queue_idx] = rxq;
+ rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
ice_reset_tx_queue(txq);
txq->q_set = TRUE;
dev->data->tx_queues[queue_idx] = txq;
+ txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
return 0;
}
#endif
dev->rx_pkt_burst == ice_recv_scattered_pkts)
return ptypes;
+
+#ifdef RTE_ARCH_X86
+ if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec)
+ return ptypes;
+#endif
+
return NULL;
}
+int
+ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct ice_rx_queue *rxq = rx_queue;
+ volatile uint64_t *status;
+ uint64_t mask;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+ mask = rte_cpu_to_le_64((1ULL << ICE_RX_DESC_STATUS_DD_S) <<
+ ICE_RXD_QW1_STATUS_S);
+ if (*status & mask)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct ice_tx_queue *txq = tx_queue;
+ volatile uint64_t *status;
+ uint64_t mask, expect;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
+ expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
+ ICE_TXD_QW1_DTYPE_S);
+ if ((*status & mask) == expect)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void
ice_clear_queues(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
struct ice_adapter *ad =
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+#ifdef RTE_ARCH_X86
+ struct ice_rx_queue *rxq;
+ int i;
+
+ if (!ice_rx_vec_dev_check(dev)) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ (void)ice_rxq_vec_setup(rxq);
+ }
+ if (dev->data->scattered_rx) {
+ PMD_DRV_LOG(DEBUG,
+ "Using Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = ice_recv_pkts_vec;
+ }
+
+ return;
+ }
+#endif
if (dev->data->scattered_rx) {
/* Set the non-LRO scattered function */