ethdev: remove legacy Rx descriptor done API
[dpdk.git] / drivers / net / e1000 / igb_rxtx.c
index 45da4ed..7b2a6b0 100644 (file)
@@ -112,6 +112,7 @@ struct igb_rx_queue {
        uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
        uint32_t            flags;      /**< RX flags. */
        uint64_t            offloads;   /**< offloads of DEV_RX_OFFLOAD_* */
+       const struct rte_memzone *mz;
 };
 
 /**
@@ -186,6 +187,7 @@ struct igb_tx_queue {
        struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
        /**< Hardware context history.*/
        uint64_t               offloads; /**< offloads of DEV_TX_OFFLOAD_* */
+       const struct rte_memzone *mz;
 };
 
 #if 1
@@ -1276,14 +1278,15 @@ igb_tx_queue_release(struct igb_tx_queue *txq)
        if (txq != NULL) {
                igb_tx_queue_release_mbufs(txq);
                rte_free(txq->sw_ring);
+               rte_memzone_free(txq->mz);
                rte_free(txq);
        }
 }
 
 void
-eth_igb_tx_queue_release(void *txq)
+eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       igb_tx_queue_release(txq);
+       igb_tx_queue_release(dev->data->tx_queues[qid]);
 }
 
 static int
@@ -1545,6 +1548,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
+       txq->mz = tz;
        txq->nb_tx_desc = nb_desc;
        txq->pthresh = tx_conf->tx_thresh.pthresh;
        txq->hthresh = tx_conf->tx_thresh.hthresh;
@@ -1601,14 +1605,15 @@ igb_rx_queue_release(struct igb_rx_queue *rxq)
        if (rxq != NULL) {
                igb_rx_queue_release_mbufs(rxq);
                rte_free(rxq->sw_ring);
+               rte_memzone_free(rxq->mz);
                rte_free(rxq);
        }
 }
 
 void
-eth_igb_rx_queue_release(void *rxq)
+eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       igb_rx_queue_release(rxq);
+       igb_rx_queue_release(dev->data->rx_queues[qid]);
 }
 
 static void
@@ -1746,6 +1751,8 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
                igb_rx_queue_release(rxq);
                return -ENOMEM;
        }
+
+       rxq->mz = rz;
        rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
        rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
        rxq->rx_ring_phys_addr = rz->iova;
@@ -1791,23 +1798,6 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        return desc;
 }
 
-int
-eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
-       volatile union e1000_adv_rx_desc *rxdp;
-       struct igb_rx_queue *rxq = rx_queue;
-       uint32_t desc;
-
-       if (unlikely(offset >= rxq->nb_rx_desc))
-               return 0;
-       desc = rxq->rx_tail + offset;
-       if (desc >= rxq->nb_rx_desc)
-               desc -= rxq->nb_rx_desc;
-
-       rxdp = &rxq->rx_ring[desc];
-       return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
-}
-
 int
 eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
 {
@@ -1883,16 +1873,14 @@ igb_dev_free_queues(struct rte_eth_dev *dev)
        uint16_t i;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               eth_igb_rx_queue_release(dev->data->rx_queues[i]);
+               eth_igb_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
-               rte_eth_dma_zone_free(dev, "rx_ring", i);
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               eth_igb_tx_queue_release(dev->data->tx_queues[i]);
+               eth_igb_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
-               rte_eth_dma_zone_free(dev, "tx_ring", i);
        }
        dev->data->nb_tx_queues = 0;
 }
@@ -2343,15 +2331,18 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
         * Configure support of jumbo frames, if any.
         */
        if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+               uint32_t max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
                rctl |= E1000_RCTL_LPE;
 
                /*
                 * Set maximum packet length by default, and might be updated
                 * together with enabling/disabling dual VLAN.
                 */
-               E1000_WRITE_REG(hw, E1000_RLPML,
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                                               VLAN_TAG_SIZE);
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+                       max_len += VLAN_TAG_SIZE;
+
+               E1000_WRITE_REG(hw, E1000_RLPML, max_len);
        } else
                rctl &= ~E1000_RCTL_LPE;