net/ice: support Rx scatter SSE vector
[dpdk.git] / drivers / net / ice / ice_rxtx.c
index f7637d2..748954f 100644 (file)
@@ -7,8 +7,6 @@
 
 #include "ice_rxtx.h"
 
-#define ICE_TD_CMD ICE_TX_DESC_CMD_EOP
-
 #define ICE_TX_CKSUM_OFFLOAD_MASK (             \
                PKT_TX_IP_CKSUM |                \
                PKT_TX_L4_MASK |                 \
@@ -94,6 +92,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
        /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
        rx_ctx.l2tsel = 1;
        rx_ctx.showiv = 0;
+       rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
 
        err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
        if (err) {
@@ -164,7 +163,7 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
 
 /* Free all mbufs for descriptors in rx queue */
 static void
-ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
+_ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
 {
        uint16_t i;
 
@@ -192,6 +191,12 @@ ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
 #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
 }
 
+static void
+ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
+{
+       rxq->rx_rel_mbufs(rxq);
+}
+
 /* turn on or off rx queue
  * @q_idx: queue index in pf scope
  * @on: turn on or off the queue
@@ -318,6 +323,9 @@ ice_reset_rx_queue(struct ice_rx_queue *rxq)
        rxq->nb_rx_hold = 0;
        rxq->pkt_first_seg = NULL;
        rxq->pkt_last_seg = NULL;
+
+       rxq->rxrearm_start = 0;
+       rxq->rxrearm_nb = 0;
 }
 
 int
@@ -467,7 +475,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 /* Free all mbufs for descriptors in tx queue */
 static void
-ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
+_ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
 {
        uint16_t i;
 
@@ -483,6 +491,11 @@ ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
                }
        }
 }
+static void
+ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
+{
+       txq->tx_rel_mbufs(txq);
+}
 
 static void
 ice_reset_tx_queue(struct ice_tx_queue *txq)
@@ -668,6 +681,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
        ice_reset_rx_queue(rxq);
        rxq->q_set = TRUE;
        dev->data->rx_queues[queue_idx] = rxq;
+       rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
 
        use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
 
@@ -865,6 +879,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
        ice_reset_tx_queue(txq);
        txq->q_set = TRUE;
        dev->data->tx_queues[queue_idx] = txq;
+       txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
 
        return 0;
 }
@@ -1487,9 +1502,74 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 #endif
            dev->rx_pkt_burst == ice_recv_scattered_pkts)
                return ptypes;
+
+#ifdef RTE_ARCH_X86
+       if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
+           dev->rx_pkt_burst == ice_recv_scattered_pkts_vec)
+               return ptypes;
+#endif
+
        return NULL;
 }
 
+int
+ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+       struct ice_rx_queue *rxq = rx_queue;
+       volatile uint64_t *status;
+       uint64_t mask;
+       uint32_t desc;
+
+       if (unlikely(offset >= rxq->nb_rx_desc))
+               return -EINVAL;
+
+       if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+               return RTE_ETH_RX_DESC_UNAVAIL;
+
+       desc = rxq->rx_tail + offset;
+       if (desc >= rxq->nb_rx_desc)
+               desc -= rxq->nb_rx_desc;
+
+       status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+       mask = rte_cpu_to_le_64((1ULL << ICE_RX_DESC_STATUS_DD_S) <<
+                               ICE_RXD_QW1_STATUS_S);
+       if (*status & mask)
+               return RTE_ETH_RX_DESC_DONE;
+
+       return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+       struct ice_tx_queue *txq = tx_queue;
+       volatile uint64_t *status;
+       uint64_t mask, expect;
+       uint32_t desc;
+
+       if (unlikely(offset >= txq->nb_tx_desc))
+               return -EINVAL;
+
+       desc = txq->tx_tail + offset;
+       /* go to next desc that has the RS bit */
+       desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+               txq->tx_rs_thresh;
+       if (desc >= txq->nb_tx_desc) {
+               desc -= txq->nb_tx_desc;
+               if (desc >= txq->nb_tx_desc)
+                       desc -= txq->nb_tx_desc;
+       }
+
+       status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+       mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
+       expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
+                                 ICE_TXD_QW1_DTYPE_S);
+       if ((*status & mask) == expect)
+               return RTE_ETH_TX_DESC_DONE;
+
+       return RTE_ETH_TX_DESC_FULL;
+}
+
 void
 ice_clear_queues(struct rte_eth_dev *dev)
 {
@@ -2160,6 +2240,29 @@ ice_set_rx_function(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
        struct ice_adapter *ad =
                ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+#ifdef RTE_ARCH_X86
+       struct ice_rx_queue *rxq;
+       int i;
+
+       if (!ice_rx_vec_dev_check(dev)) {
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                       rxq = dev->data->rx_queues[i];
+                       (void)ice_rxq_vec_setup(rxq);
+               }
+               if (dev->data->scattered_rx) {
+                       PMD_DRV_LOG(DEBUG,
+                                   "Using Vector Scattered Rx (port %d).",
+                                   dev->data->port_id);
+                       dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
+               } else {
+                       PMD_DRV_LOG(DEBUG, "Using Vector Rx (port %d).",
+                                   dev->data->port_id);
+                       dev->rx_pkt_burst = ice_recv_pkts_vec;
+               }
+
+               return;
+       }
+#endif
 
        if (dev->data->scattered_rx) {
                /* Set the non-LRO scattered function */