drivers/net: fix removing jumbo offload flag
[dpdk.git] / drivers / net / i40e / i40e_rxtx.c
index 8489f91..fd066e8 100644 (file)
@@ -490,7 +490,8 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
                                        I40E_RXD_QW1_STATUS_SHIFT;
                }
 
-               rte_smp_rmb();
+               /* This barrier is to order loads of different words in the descriptor */
+               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
 
                /* Compute how many status bits were set */
                for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) {
@@ -1941,12 +1942,10 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
                        const struct rte_eth_rxconf *rx_conf,
                        struct rte_mempool *mp)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_adapter *ad =
                I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        struct i40e_vsi *vsi;
        struct i40e_pf *pf = NULL;
-       struct i40e_vf *vf = NULL;
        struct i40e_rx_queue *rxq;
        const struct rte_memzone *rz;
        uint32_t ring_size;
@@ -1957,22 +1956,14 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
-       if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
-               vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
-               vsi = &vf->vsi;
-               if (!vsi)
-                       return -EINVAL;
-               reg_idx = queue_idx;
-       } else {
-               pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-               vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
-               if (!vsi)
-                       return -EINVAL;
-               q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
-               if (q_offset < 0)
-                       return -EINVAL;
-               reg_idx = vsi->base_queue + q_offset;
-       }
+       pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+       if (!vsi)
+               return -EINVAL;
+       q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+       if (q_offset < 0)
+               return -EINVAL;
+       reg_idx = vsi->base_queue + q_offset;
 
        if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
            (nb_desc > I40E_MAX_RING_DESC) ||
@@ -1984,7 +1975,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        /* Free memory if needed */
        if (dev->data->rx_queues[queue_idx]) {
-               i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               i40e_rx_queue_release(dev->data->rx_queues[queue_idx]);
                dev->data->rx_queues[queue_idx] = NULL;
        }
 
@@ -2028,11 +2019,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
                              ring_size, I40E_RING_BASE_ALIGN, socket_id);
        if (!rz) {
-               i40e_dev_rx_queue_release(rxq);
+               i40e_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
                return -ENOMEM;
        }
 
+       rxq->mz = rz;
        /* Zero all the descriptors in the ring. */
        memset(rz->addr, 0, ring_size);
 
@@ -2048,7 +2040,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
                                   RTE_CACHE_LINE_SIZE,
                                   socket_id);
        if (!rxq->sw_ring) {
-               i40e_dev_rx_queue_release(rxq);
+               i40e_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
                return -ENOMEM;
        }
@@ -2071,7 +2063,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        if (dev->data->dev_started) {
                if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) {
-                       i40e_dev_rx_queue_release(rxq);
+                       i40e_rx_queue_release(rxq);
                        return -EINVAL;
                }
        } else {
@@ -2101,7 +2093,19 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 }
 
 void
-i40e_dev_rx_queue_release(void *rxq)
+i40e_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       i40e_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+i40e_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       i40e_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
+void
+i40e_rx_queue_release(void *rxq)
 {
        struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
 
@@ -2112,18 +2116,19 @@ i40e_dev_rx_queue_release(void *rxq)
 
        i40e_rx_queue_release_mbufs(q);
        rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
        rte_free(q);
 }
 
 uint32_t
-i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+i40e_dev_rx_queue_count(void *rx_queue)
 {
 #define I40E_RXQ_SCAN_INTERVAL 4
        volatile union i40e_rx_desc *rxdp;
        struct i40e_rx_queue *rxq;
        uint16_t desc = 0;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);
        while ((desc < rxq->nb_rx_desc) &&
                ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
@@ -2144,32 +2149,6 @@ i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        return desc;
 }
 
-int
-i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
-       volatile union i40e_rx_desc *rxdp;
-       struct i40e_rx_queue *rxq = rx_queue;
-       uint16_t desc;
-       int ret;
-
-       if (unlikely(offset >= rxq->nb_rx_desc)) {
-               PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset);
-               return 0;
-       }
-
-       desc = rxq->rx_tail + offset;
-       if (desc >= rxq->nb_rx_desc)
-               desc -= rxq->nb_rx_desc;
-
-       rxdp = &(rxq->rx_ring[desc]);
-
-       ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
-               I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
-                               (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
-
-       return ret;
-}
-
 int
 i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
 {
@@ -2281,10 +2260,8 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                        unsigned int socket_id,
                        const struct rte_eth_txconf *tx_conf)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_vsi *vsi;
        struct i40e_pf *pf = NULL;
-       struct i40e_vf *vf = NULL;
        struct i40e_tx_queue *txq;
        const struct rte_memzone *tz;
        uint32_t ring_size;
@@ -2295,20 +2272,14 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
-       if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
-               vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
-               vsi = &vf->vsi;
-               reg_idx = queue_idx;
-       } else {
-               pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-               vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
-               if (!vsi)
-                       return -EINVAL;
-               q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
-               if (q_offset < 0)
-                       return -EINVAL;
-               reg_idx = vsi->base_queue + q_offset;
-       }
+       pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+       if (!vsi)
+               return -EINVAL;
+       q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+       if (q_offset < 0)
+               return -EINVAL;
+       reg_idx = vsi->base_queue + q_offset;
 
        if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
            (nb_desc > I40E_MAX_RING_DESC) ||
@@ -2406,7 +2377,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        /* Free memory if needed. */
        if (dev->data->tx_queues[queue_idx]) {
-               i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               i40e_tx_queue_release(dev->data->tx_queues[queue_idx]);
                dev->data->tx_queues[queue_idx] = NULL;
        }
 
@@ -2427,11 +2398,12 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
                              ring_size, I40E_RING_BASE_ALIGN, socket_id);
        if (!tz) {
-               i40e_dev_tx_queue_release(txq);
+               i40e_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
                return -ENOMEM;
        }
 
+       txq->mz = tz;
        txq->nb_tx_desc = nb_desc;
        txq->tx_rs_thresh = tx_rs_thresh;
        txq->tx_free_thresh = tx_free_thresh;
@@ -2455,7 +2427,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                                   RTE_CACHE_LINE_SIZE,
                                   socket_id);
        if (!txq->sw_ring) {
-               i40e_dev_tx_queue_release(txq);
+               i40e_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
                return -ENOMEM;
        }
@@ -2478,7 +2450,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        if (dev->data->dev_started) {
                if (i40e_dev_tx_queue_setup_runtime(dev, txq)) {
-                       i40e_dev_tx_queue_release(txq);
+                       i40e_tx_queue_release(txq);
                        return -EINVAL;
                }
        } else {
@@ -2494,7 +2466,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 }
 
 void
-i40e_dev_tx_queue_release(void *txq)
+i40e_tx_queue_release(void *txq)
 {
        struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
 
@@ -2505,6 +2477,7 @@ i40e_dev_tx_queue_release(void *txq)
 
        i40e_tx_queue_release_mbufs(q);
        rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
        rte_free(q);
 }
 
@@ -2589,6 +2562,10 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
        rxq->rx_tail = 0;
        rxq->nb_rx_hold = 0;
+
+       if (rxq->pkt_first_seg != NULL)
+               rte_pktmbuf_free(rxq->pkt_first_seg);
+
        rxq->pkt_first_seg = NULL;
        rxq->pkt_last_seg = NULL;
 
@@ -2922,28 +2899,15 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
        }
 
        rxq->max_pkt_len =
-               RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len *
-                       rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len);
-       if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
-                       rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
-                       PMD_DRV_LOG(ERR, "maximum packet length must "
-                                   "be larger than %u and smaller than %u,"
-                                   "as jumbo frame is enabled",
-                                   (uint32_t)I40E_ETH_MAX_LEN,
-                                   (uint32_t)I40E_FRAME_SIZE_MAX);
-                       return I40E_ERR_CONFIG;
-               }
-       } else {
-               if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
-                       rxq->max_pkt_len > I40E_ETH_MAX_LEN) {
-                       PMD_DRV_LOG(ERR, "maximum packet length must be "
-                                   "larger than %u and smaller than %u, "
-                                   "as jumbo frame is disabled",
-                                   (uint32_t)RTE_ETHER_MIN_LEN,
-                                   (uint32_t)I40E_ETH_MAX_LEN);
-                       return I40E_ERR_CONFIG;
-               }
+               RTE_MIN(hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len,
+                               data->mtu + I40E_ETH_OVERHEAD);
+       if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+               rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
+               PMD_DRV_LOG(ERR, "maximum packet length must be "
+                           "larger than %u and smaller than %u",
+                           (uint32_t)RTE_ETHER_MIN_LEN,
+                           (uint32_t)I40E_FRAME_SIZE_MAX);
+               return I40E_ERR_CONFIG;
        }
 
        return 0;
@@ -3055,17 +3019,15 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                if (!dev->data->rx_queues[i])
                        continue;
-               i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
+               i40e_rx_queue_release(dev->data->rx_queues[i]);
                dev->data->rx_queues[i] = NULL;
-               rte_eth_dma_zone_free(dev, "rx_ring", i);
        }
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                if (!dev->data->tx_queues[i])
                        continue;
-               i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
+               i40e_tx_queue_release(dev->data->tx_queues[i]);
                dev->data->tx_queues[i] = NULL;
-               rte_eth_dma_zone_free(dev, "tx_ring", i);
        }
 }
 
@@ -3103,11 +3065,12 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
                                      I40E_FDIR_QUEUE_ID, ring_size,
                                      I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
        if (!tz) {
-               i40e_dev_tx_queue_release(txq);
+               i40e_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
                return I40E_ERR_NO_MEMORY;
        }
 
+       txq->mz = tz;
        txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
        txq->queue_id = I40E_FDIR_QUEUE_ID;
        txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
@@ -3161,11 +3124,12 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
                                      I40E_FDIR_QUEUE_ID, ring_size,
                                      I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
        if (!rz) {
-               i40e_dev_rx_queue_release(rxq);
+               i40e_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
                return I40E_ERR_NO_MEMORY;
        }
 
+       rxq->mz = rz;
        rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC;
        rxq->queue_id = I40E_FDIR_QUEUE_ID;
        rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
@@ -3223,10 +3187,10 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        qinfo->conf.offloads = txq->offloads;
 }
 
+#ifdef RTE_ARCH_X86
 static inline bool
 get_avx_supported(bool request_avx512)
 {
-#ifdef RTE_ARCH_X86
        if (request_avx512) {
                if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
                rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
@@ -3250,12 +3214,10 @@ get_avx_supported(bool request_avx512)
                return false;
 #endif
        }
-#else
-       RTE_SET_USED(request_avx512);
-#endif /* RTE_ARCH_X86 */
 
        return false;
 }
+#endif /* RTE_ARCH_X86 */
 
 
 void __rte_cold