remove extra parentheses in return statement
[dpdk.git] / drivers / net / i40e / i40e_rxtx.c
index 8731712..40cffc1 100644 (file)
@@ -57,9 +57,6 @@
 #include "i40e_ethdev.h"
 #include "i40e_rxtx.h"
 
-#define I40E_MIN_RING_DESC     64
-#define I40E_MAX_RING_DESC     4096
-#define I40E_ALIGN             128
 #define DEFAULT_TX_RS_THRESH   32
 #define DEFAULT_TX_FREE_THRESH 32
 #define I40E_MAX_PKT_TYPE      256
@@ -68,6 +65,9 @@
 
 #define I40E_DMA_MEM_ALIGN 4096
 
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define I40E_RING_BASE_ALIGN   128
+
 #define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
                                        ETH_TXQ_FLAGS_NOOFFLOADS)
 
 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
        ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
 
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
-                          const char *ring_name,
-                          uint16_t queue_id,
-                          uint32_t ring_size,
-                          int socket_id);
 static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
                                      struct rte_mbuf **tx_pkts,
                                      uint16_t nb_pkts);
@@ -1489,7 +1483,7 @@ i40e_calc_context_desc(uint64_t flags)
        mask |= PKT_TX_IEEE1588_TMST;
 #endif
 
-       return ((flags & mask) ? 1 : 0);
+       return (flags & mask) ? 1 : 0;
 }
 
 /* set i40e TSO context descriptor */
@@ -2006,7 +2000,8 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
                        i40e_rx_queue_release_mbufs(rxq);
                        i40e_reset_rx_queue(rxq);
-               }
+               } else
+                       dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
        }
 
        return err;
@@ -2035,6 +2030,7 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
                }
                i40e_rx_queue_release_mbufs(rxq);
                i40e_reset_rx_queue(rxq);
+               dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
        }
 
        return 0;
@@ -2060,6 +2056,8 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
                if (err)
                        PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
                                    tx_queue_id);
+               else
+                       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
        }
 
        return err;
@@ -2089,6 +2087,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
                i40e_tx_queue_release_mbufs(txq);
                i40e_reset_tx_queue(txq);
+               dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
        }
 
        return 0;
@@ -2110,7 +2109,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        struct i40e_rx_queue *rxq;
        const struct rte_memzone *rz;
        uint32_t ring_size;
-       uint16_t len;
+       uint16_t len, i;
+       uint16_t base, bsf, tc_mapping;
        int use_def_burst_func = 1;
 
        if (hw->mac.type == I40E_MAC_VF) {
@@ -2125,9 +2125,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
                            "index exceeds the maximum");
                return I40E_ERR_PARAM;
        }
-       if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
-                                       (nb_desc > I40E_MAX_RING_DESC) ||
-                                       (nb_desc < I40E_MIN_RING_DESC)) {
+       if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+                       (nb_desc > I40E_MAX_RING_DESC) ||
+                       (nb_desc < I40E_MIN_RING_DESC)) {
                PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
                            "invalid", nb_desc);
                return I40E_ERR_PARAM;
@@ -2147,7 +2147,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        if (!rxq) {
                PMD_DRV_LOG(ERR, "Failed to allocate memory for "
                            "rx queue data structure");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        rxq->mp = mp;
        rxq->nb_rx_desc = nb_desc;
@@ -2169,26 +2169,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        /* Allocate the maximun number of RX ring hardware descriptor. */
        ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
        ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
-       rz = i40e_ring_dma_zone_reserve(dev,
-                                       "rx_ring",
-                                       queue_idx,
-                                       ring_size,
-                                       socket_id);
+       rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+                             ring_size, I40E_RING_BASE_ALIGN, socket_id);
        if (!rz) {
                i40e_dev_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Zero all the descriptors in the ring. */
        memset(rz->addr, 0, ring_size);
 
-#ifdef RTE_LIBRTE_XEN_DOM0
        rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
-       rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
-
        rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
 
 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
@@ -2206,7 +2198,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        if (!rxq->sw_ring) {
                i40e_dev_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        i40e_reset_rx_queue(rxq);
@@ -2231,6 +2223,19 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
                ad->rx_bulk_alloc_allowed = false;
        }
 
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               if (!(vsi->enabled_tc & (1 << i)))
+                       continue;
+               tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+               base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+                       I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+               bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+                       I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+
+               if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
+                       rxq->dcb_tc = i;
+       }
+
        return 0;
 }
 
@@ -2323,6 +2328,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        const struct rte_memzone *tz;
        uint32_t ring_size;
        uint16_t tx_rs_thresh, tx_free_thresh;
+       uint16_t i, base, bsf, tc_mapping;
 
        if (hw->mac.type == I40E_MAC_VF) {
                struct i40e_vf *vf =
@@ -2337,9 +2343,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                return I40E_ERR_PARAM;
        }
 
-       if (((nb_desc * sizeof(struct i40e_tx_desc)) % I40E_ALIGN) != 0 ||
-                                       (nb_desc > I40E_MAX_RING_DESC) ||
-                                       (nb_desc < I40E_MIN_RING_DESC)) {
+       if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+                       (nb_desc > I40E_MAX_RING_DESC) ||
+                       (nb_desc < I40E_MIN_RING_DESC)) {
                PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
                            "invalid", nb_desc);
                return I40E_ERR_PARAM;
@@ -2431,21 +2437,18 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        if (!txq) {
                PMD_DRV_LOG(ERR, "Failed to allocate memory for "
                            "tx queue structure");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Allocate TX hardware ring descriptors. */
        ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
        ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
-       tz = i40e_ring_dma_zone_reserve(dev,
-                                       "tx_ring",
-                                       queue_idx,
-                                       ring_size,
-                                       socket_id);
+       tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+                             ring_size, I40E_RING_BASE_ALIGN, socket_id);
        if (!tz) {
                i40e_dev_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        txq->nb_tx_desc = nb_desc;
@@ -2466,11 +2469,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq->vsi = vsi;
        txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
-#ifdef RTE_LIBRTE_XEN_DOM0
        txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
-       txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
        txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
 
        /* Allocate software ring */
@@ -2482,7 +2481,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        if (!txq->sw_ring) {
                i40e_dev_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        i40e_reset_tx_queue(txq);
@@ -2492,6 +2491,19 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        /* Use a simple TX queue without offloads or multi segs if possible */
        i40e_set_tx_function_flag(dev, txq);
 
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               if (!(vsi->enabled_tc & (1 << i)))
+                       continue;
+               tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
+               base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+                       I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
+               bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+                       I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
+
+               if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
+                       txq->dcb_tc = i;
+       }
+
        return 0;
 }
 
@@ -2510,47 +2522,21 @@ i40e_dev_tx_queue_release(void *txq)
        rte_free(q);
 }
 
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
-                          const char *ring_name,
-                          uint16_t queue_id,
-                          uint32_t ring_size,
-                          int socket_id)
-{
-       char z_name[RTE_MEMZONE_NAMESIZE];
-       const struct rte_memzone *mz;
-
-       snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                       dev->driver->pci_drv.name, ring_name,
-                               dev->data->port_id, queue_id);
-       mz = rte_memzone_lookup(z_name);
-       if (mz)
-               return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
-       return rte_memzone_reserve_bounded(z_name, ring_size,
-               socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
-       return rte_memzone_reserve_aligned(z_name, ring_size,
-                               socket_id, 0, I40E_ALIGN);
-#endif
-}
-
 const struct rte_memzone *
 i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
 {
-       const struct rte_memzone *mz = NULL;
+       const struct rte_memzone *mz;
 
        mz = rte_memzone_lookup(name);
        if (mz)
                return mz;
-#ifdef RTE_LIBRTE_XEN_DOM0
-       mz = rte_memzone_reserve_bounded(name, len,
-               socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
-       mz = rte_memzone_reserve_aligned(name, len,
-                               socket_id, 0, I40E_ALIGN);
-#endif
+
+       if (rte_xen_dom0_supported())
+               mz = rte_memzone_reserve_bounded(name, len,
+                               socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
+       else
+               mz = rte_memzone_reserve_aligned(name, len,
+                               socket_id, 0, I40E_RING_BASE_ALIGN);
        return mz;
 }
 
@@ -2704,7 +2690,7 @@ i40e_tx_queue_init(struct i40e_tx_queue *txq)
 #ifdef RTE_LIBRTE_IEEE1588
        tx_ctx.timesync_ena = 1;
 #endif
-       tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[0]);
+       tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[txq->dcb_tc]);
        if (vsi->type == I40E_VSI_FDIR)
                tx_ctx.fd_ena = TRUE;
 
@@ -2970,11 +2956,9 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
        ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
        ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
 
-       tz = i40e_ring_dma_zone_reserve(dev,
-                                       "fdir_tx_ring",
-                                       I40E_FDIR_QUEUE_ID,
-                                       ring_size,
-                                       SOCKET_ID_ANY);
+       tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
+                                     I40E_FDIR_QUEUE_ID, ring_size,
+                                     I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
        if (!tz) {
                i40e_dev_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
@@ -2986,11 +2970,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
        txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
        txq->vsi = pf->fdir.fdir_vsi;
 
-#ifdef RTE_LIBRTE_XEN_DOM0
        txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
-       txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
        txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
        /*
         * don't need to allocate software ring and reset for the fdir
@@ -3030,11 +3010,9 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
        ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
        ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
 
-       rz = i40e_ring_dma_zone_reserve(dev,
-                                       "fdir_rx_ring",
-                                       I40E_FDIR_QUEUE_ID,
-                                       ring_size,
-                                       SOCKET_ID_ANY);
+       rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
+                                     I40E_FDIR_QUEUE_ID, ring_size,
+                                     I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
        if (!rz) {
                i40e_dev_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
@@ -3046,11 +3024,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
        rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
        rxq->vsi = pf->fdir.fdir_vsi;
 
-#ifdef RTE_LIBRTE_XEN_DOM0
        rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
-       rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
        rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
 
        /*
@@ -3063,6 +3037,43 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
        return I40E_SUCCESS;
 }
 
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_rxq_info *qinfo)
+{
+       struct i40e_rx_queue *rxq;
+
+       rxq = dev->data->rx_queues[queue_id];
+
+       qinfo->mp = rxq->mp;
+       qinfo->scattered_rx = dev->data->scattered_rx;
+       qinfo->nb_desc = rxq->nb_rx_desc;
+
+       qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+       qinfo->conf.rx_drop_en = rxq->drop_en;
+       qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_txq_info *qinfo)
+{
+       struct i40e_tx_queue *txq;
+
+       txq = dev->data->tx_queues[queue_id];
+
+       qinfo->nb_desc = txq->nb_tx_desc;
+
+       qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+       qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+       qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+       qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+       qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+       qinfo->conf.txq_flags = txq->txq_flags;
+       qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
 void __attribute__((cold))
 i40e_set_rx_function(struct rte_eth_dev *dev)
 {
@@ -3259,4 +3270,3 @@ i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
 {
        return 0;
 }
-