e1000: get queue info and descriptor limits
authorKonstantin Ananyev <konstantin.ananyev@intel.com>
Tue, 27 Oct 2015 12:51:46 +0000 (12:51 +0000)
committerThomas Monjalon <thomas.monjalon@6wind.com>
Sun, 1 Nov 2015 23:14:00 +0000 (00:14 +0100)
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Remy Horton <remy.horton@intel.com>
drivers/net/e1000/e1000_ethdev.h
drivers/net/e1000/em_ethdev.c
drivers/net/e1000/em_rxtx.c
drivers/net/e1000/igb_ethdev.c
drivers/net/e1000/igb_rxtx.c

index 4e69e44..3c6f613 100644 (file)
        ETH_RSS_IPV6_TCP_EX | \
        ETH_RSS_IPV6_UDP_EX)
 
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * desscriptors should meet the following condition:
+ * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+#define        E1000_MIN_RING_DESC     32
+#define        E1000_MAX_RING_DESC     4096
+
+/*
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
+ * This will also optimize cache line size effect.
+ * H/W supports up to cache line size 128.
+ */
+#define        E1000_ALIGN     128
+
+#define        IGB_RXD_ALIGN   (E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
+#define        IGB_TXD_ALIGN   (E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
+
+#define        EM_RXD_ALIGN    (E1000_ALIGN / sizeof(struct e1000_rx_desc))
+#define        EM_TXD_ALIGN    (E1000_ALIGN / sizeof(struct e1000_data_desc))
+
 /* structure for interrupt relative data */
 struct e1000_interrupt {
        uint32_t flags;
@@ -307,6 +331,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
 
 int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
 
+void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_rxq_info *qinfo);
+
+void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_txq_info *qinfo);
+
 /*
  * RX/TX EM function prototypes
  */
@@ -343,6 +373,12 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts);
 
+void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_rxq_info *qinfo);
+
+void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_txq_info *qinfo);
+
 void igb_pf_host_uninit(struct rte_eth_dev *dev);
 
 #endif /* _E1000_ETHDEV_H_ */
index 2354544..a009bc2 100644 (file)
@@ -175,6 +175,8 @@ static const struct eth_dev_ops eth_em_ops = {
        .mac_addr_add         = eth_em_rar_set,
        .mac_addr_remove      = eth_em_rar_clear,
        .set_mc_addr_list     = eth_em_set_mc_addr_list,
+       .rxq_info_get         = em_rxq_info_get,
+       .txq_info_get         = em_txq_info_get,
 };
 
 /**
@@ -1014,6 +1016,18 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        dev_info->max_rx_queues = 1;
        dev_info->max_tx_queues = 1;
+
+       dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+               .nb_max = E1000_MAX_RING_DESC,
+               .nb_min = E1000_MIN_RING_DESC,
+               .nb_align = EM_RXD_ALIGN,
+       };
+
+       dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+               .nb_max = E1000_MAX_RING_DESC,
+               .nb_min = E1000_MIN_RING_DESC,
+               .nb_align = EM_TXD_ALIGN,
+       };
 }
 
 /* return 0 means link status changed, -1 means not changed */
index 3b8776d..03e1bc2 100644 (file)
@@ -1081,26 +1081,6 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        return (nb_rx);
 }
 
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define EM_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * desscriptors should meet the following condition:
- * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
- */
-#define EM_MIN_RING_DESC 32
-#define EM_MAX_RING_DESC 4096
-
 #define        EM_MAX_BUF_SIZE     16384
 #define EM_RCTL_FLXBUF_STEP 1024
 
@@ -1210,11 +1190,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
        /*
         * Validate number of transmit descriptors.
         * It must not exceed hardware maximum, and must be multiple
-        * of EM_ALIGN.
+        * of E1000_ALIGN.
         */
-       if (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 ||
-                       (nb_desc > EM_MAX_RING_DESC) ||
-                       (nb_desc < EM_MIN_RING_DESC)) {
+       if (nb_desc % EM_TXD_ALIGN != 0 ||
+                       (nb_desc > E1000_MAX_RING_DESC) ||
+                       (nb_desc < E1000_MIN_RING_DESC)) {
                return -(EINVAL);
        }
 
@@ -1272,7 +1252,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
         * handle the maximum ring size is allocated in order to allow for
         * resizing in later calls to the queue setup function.
         */
-       tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
+       tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC;
        if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
                        socket_id)) == NULL)
                return (-ENOMEM);
@@ -1375,11 +1355,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
        /*
         * Validate number of receive descriptors.
         * It must not exceed hardware maximum, and must be multiple
-        * of EM_ALIGN.
+        * of E1000_ALIGN.
         */
-       if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 ||
-                       (nb_desc > EM_MAX_RING_DESC) ||
-                       (nb_desc < EM_MIN_RING_DESC)) {
+       if (nb_desc % EM_RXD_ALIGN != 0 ||
+                       (nb_desc > E1000_MAX_RING_DESC) ||
+                       (nb_desc < E1000_MIN_RING_DESC)) {
                return (-EINVAL);
        }
 
@@ -1399,7 +1379,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
        }
 
        /* Allocate RX ring for max possible mumber of hardware descriptors. */
-       rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
+       rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC;
        if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
                        socket_id)) == NULL)
                return (-ENOMEM);
@@ -1881,3 +1861,34 @@ eth_em_tx_init(struct rte_eth_dev *dev)
        /* This write will effectively turn on the transmit unit. */
        E1000_WRITE_REG(hw, E1000_TCTL, tctl);
 }
+
+void
+em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_rxq_info *qinfo)
+{
+       struct em_rx_queue *rxq;
+
+       rxq = dev->data->rx_queues[queue_id];
+
+       qinfo->mp = rxq->mb_pool;
+       qinfo->scattered_rx = dev->data->scattered_rx;
+       qinfo->nb_desc = rxq->nb_rx_desc;
+       qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+}
+
+void
+em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_txq_info *qinfo)
+{
+       struct em_tx_queue *txq;
+
+       txq = dev->data->tx_queues[queue_id];
+
+       qinfo->nb_desc = txq->nb_tx_desc;
+
+       qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+       qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+       qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+       qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+       qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+}
index 7a8fa93..c2b92f8 100644 (file)
@@ -280,6 +280,18 @@ static const struct rte_pci_id pci_id_igbvf_map[] = {
 {0},
 };
 
+static const struct rte_eth_desc_lim rx_desc_lim = {
+       .nb_max = E1000_MAX_RING_DESC,
+       .nb_min = E1000_MIN_RING_DESC,
+       .nb_align = IGB_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+       .nb_max = E1000_MAX_RING_DESC,
+       .nb_min = E1000_MIN_RING_DESC,
+       .nb_align = IGB_RXD_ALIGN,
+};
+
 static const struct eth_dev_ops eth_igb_ops = {
        .dev_configure        = eth_igb_configure,
        .dev_start            = eth_igb_start,
@@ -318,6 +330,8 @@ static const struct eth_dev_ops eth_igb_ops = {
        .rss_hash_conf_get    = eth_igb_rss_hash_conf_get,
        .filter_ctrl          = eth_igb_filter_ctrl,
        .set_mc_addr_list     = eth_igb_set_mc_addr_list,
+       .rxq_info_get         = igb_rxq_info_get,
+       .txq_info_get         = igb_txq_info_get,
        .timesync_enable      = igb_timesync_enable,
        .timesync_disable     = igb_timesync_disable,
        .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
@@ -348,6 +362,8 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
        .tx_queue_setup       = eth_igb_tx_queue_setup,
        .tx_queue_release     = eth_igb_tx_queue_release,
        .set_mc_addr_list     = eth_igb_set_mc_addr_list,
+       .rxq_info_get         = igb_rxq_info_get,
+       .txq_info_get         = igb_txq_info_get,
        .mac_addr_set         = igbvf_default_mac_addr_set,
        .get_reg_length       = igbvf_get_reg_length,
        .get_reg              = igbvf_get_regs,
@@ -1654,6 +1670,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                },
                .txq_flags = 0,
        };
+
+       dev_info->rx_desc_lim = rx_desc_lim;
+       dev_info->tx_desc_lim = tx_desc_lim;
 }
 
 static void
@@ -1706,6 +1725,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                },
                .txq_flags = 0,
        };
+
+       dev_info->rx_desc_lim = rx_desc_lim;
+       dev_info->tx_desc_lim = tx_desc_lim;
 }
 
 /* return 0 means link status changed, -1 means not changed */
index d734a19..384e4f1 100644 (file)
@@ -1202,16 +1202,6 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        return (nb_rx);
 }
 
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define IGB_ALIGN 128
-
 /*
  * Maximum number of Ring Descriptors.
  *
@@ -1219,9 +1209,6 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
  * desscriptors should meet the following condition:
  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
  */
-#define IGB_MIN_RING_DESC 32
-#define IGB_MAX_RING_DESC 4096
-
 static const struct rte_memzone *
 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
                      uint16_t queue_id, uint32_t ring_size, int socket_id)
@@ -1238,10 +1225,10 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
 
 #ifdef RTE_LIBRTE_XEN_DOM0
        return rte_memzone_reserve_bounded(z_name, ring_size,
-                       socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
+                       socket_id, 0, E1000_ALIGN, RTE_PGSIZE_2M);
 #else
        return rte_memzone_reserve_aligned(z_name, ring_size,
-                       socket_id, 0, IGB_ALIGN);
+                       socket_id, 0, E1000_ALIGN);
 #endif
 }
 
@@ -1337,10 +1324,11 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
        /*
         * Validate number of transmit descriptors.
         * It must not exceed hardware maximum, and must be multiple
-        * of IGB_ALIGN.
+        * of E1000_ALIGN.
         */
-       if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
-           (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+       if (nb_desc % IGB_TXD_ALIGN != 0 ||
+                       (nb_desc > E1000_MAX_RING_DESC) ||
+                       (nb_desc < E1000_MIN_RING_DESC)) {
                return -EINVAL;
        }
 
@@ -1376,7 +1364,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
         * handle the maximum ring size is allocated in order to allow for
         * resizing in later calls to the queue setup function.
         */
-       size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
+       size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
        tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
                                        size, socket_id);
        if (tz == NULL) {
@@ -1485,10 +1473,11 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
        /*
         * Validate number of receive descriptors.
         * It must not exceed hardware maximum, and must be multiple
-        * of IGB_ALIGN.
+        * of E1000_ALIGN.
         */
-       if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
-           (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+       if (nb_desc % IGB_RXD_ALIGN != 0 ||
+                       (nb_desc > E1000_MAX_RING_DESC) ||
+                       (nb_desc < E1000_MIN_RING_DESC)) {
                return (-EINVAL);
        }
 
@@ -1524,7 +1513,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
         *  handle the maximum ring size is allocated in order to allow for
         *  resizing in later calls to the queue setup function.
         */
-       size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
+       size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
        rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
        if (rz == NULL) {
                igb_rx_queue_release(rxq);
@@ -2537,3 +2526,34 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
        }
 
 }
+
+void
+igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_rxq_info *qinfo)
+{
+       struct igb_rx_queue *rxq;
+
+       rxq = dev->data->rx_queues[queue_id];
+
+       qinfo->mp = rxq->mb_pool;
+       qinfo->scattered_rx = dev->data->scattered_rx;
+       qinfo->nb_desc = rxq->nb_rx_desc;
+
+       qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+       qinfo->conf.rx_drop_en = rxq->drop_en;
+}
+
+void
+igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_txq_info *qinfo)
+{
+       struct igb_tx_queue *txq;
+
+       txq = dev->data->tx_queues[queue_id];
+
+       qinfo->nb_desc = txq->nb_tx_desc;
+
+       qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+       qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+       qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+}