ixgbe: get queue info and descriptor limits
authorKonstantin Ananyev <konstantin.ananyev@intel.com>
Tue, 27 Oct 2015 12:51:45 +0000 (12:51 +0000)
committerThomas Monjalon <thomas.monjalon@6wind.com>
Sun, 1 Nov 2015 23:13:59 +0000 (00:13 +0100)
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Remy Horton <remy.horton@intel.com>
drivers/net/ixgbe/ixgbe_ethdev.c
drivers/net/ixgbe/ixgbe_ethdev.h
drivers/net/ixgbe/ixgbe_rxtx.c
drivers/net/ixgbe/ixgbe_rxtx.h

index e9ce466..38d44d4 100644 (file)
@@ -388,6 +388,18 @@ static const struct rte_pci_id pci_id_ixgbevf_map[] = {
 
 };
 
+static const struct rte_eth_desc_lim rx_desc_lim = {
+       .nb_max = IXGBE_MAX_RING_DESC,
+       .nb_min = IXGBE_MIN_RING_DESC,
+       .nb_align = IXGBE_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+       .nb_max = IXGBE_MAX_RING_DESC,
+       .nb_min = IXGBE_MIN_RING_DESC,
+       .nb_align = IXGBE_TXD_ALIGN,
+};
+
 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .dev_configure        = ixgbe_dev_configure,
        .dev_start            = ixgbe_dev_start,
@@ -458,6 +470,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
        .filter_ctrl          = ixgbe_dev_filter_ctrl,
        .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
+       .rxq_info_get         = ixgbe_rxq_info_get,
+       .txq_info_get         = ixgbe_txq_info_get,
        .timesync_enable      = ixgbe_timesync_enable,
        .timesync_disable     = ixgbe_timesync_disable,
        .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
@@ -497,6 +511,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .mac_addr_add         = ixgbevf_add_mac_addr,
        .mac_addr_remove      = ixgbevf_remove_mac_addr,
        .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
+       .rxq_info_get         = ixgbe_rxq_info_get,
+       .txq_info_get         = ixgbe_txq_info_get,
        .mac_addr_set         = ixgbevf_set_default_mac_addr,
        .get_reg_length       = ixgbevf_get_reg_length,
        .get_reg              = ixgbevf_get_regs,
@@ -2580,6 +2596,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
                                ETH_TXQ_FLAGS_NOOFFLOADS,
        };
+
+       dev_info->rx_desc_lim = rx_desc_lim;
+       dev_info->tx_desc_lim = tx_desc_lim;
+
        dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
        dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
        dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
@@ -2634,6 +2654,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
                                ETH_TXQ_FLAGS_NOOFFLOADS,
        };
+
+       dev_info->rx_desc_lim = rx_desc_lim;
+       dev_info->tx_desc_lim = tx_desc_lim;
 }
 
 /* return 0 means link status changed, -1 means not changed */
index ccef592..569d678 100644 (file)
@@ -357,6 +357,12 @@ int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
+void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_rxq_info *qinfo);
+
+void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_txq_info *qinfo);
+
 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
 
 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
index ad66b09..5d7924c 100644 (file)
@@ -1820,25 +1820,6 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
  *
  **********************************************************************/
 
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
- * also optimize cache line size effect. H/W supports up to cache line size 128.
- */
-#define IXGBE_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * descriptors should meet the following condition:
- *      (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
- */
-#define IXGBE_MIN_RING_DESC 32
-#define IXGBE_MAX_RING_DESC 4096
-
 /*
  * Create memzone for HW rings. malloc can't be used as the physical address is
  * needed. If the memzone is already created, then this function returns a ptr
@@ -2007,9 +1988,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
         * It must not exceed hardware maximum, and must be multiple
         * of IXGBE_ALIGN.
         */
-       if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
-           (nb_desc > IXGBE_MAX_RING_DESC) ||
-           (nb_desc < IXGBE_MIN_RING_DESC)) {
+       if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
+                       (nb_desc > IXGBE_MAX_RING_DESC) ||
+                       (nb_desc < IXGBE_MIN_RING_DESC)) {
                return -EINVAL;
        }
 
@@ -2374,9 +2355,9 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
         * It must not exceed hardware maximum, and must be multiple
         * of IXGBE_ALIGN.
         */
-       if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
-           (nb_desc > IXGBE_MAX_RING_DESC) ||
-           (nb_desc < IXGBE_MIN_RING_DESC)) {
+       if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
+                       (nb_desc > IXGBE_MAX_RING_DESC) ||
+                       (nb_desc < IXGBE_MIN_RING_DESC)) {
                return (-EINVAL);
        }
 
@@ -4684,6 +4665,43 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
        return 0;
 }
 
+void
+ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_rxq_info *qinfo)
+{
+       struct ixgbe_rx_queue *rxq;
+
+       rxq = dev->data->rx_queues[queue_id];
+
+       qinfo->mp = rxq->mb_pool;
+       qinfo->scattered_rx = dev->data->scattered_rx;
+       qinfo->nb_desc = rxq->nb_rx_desc;
+
+       qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+       qinfo->conf.rx_drop_en = rxq->drop_en;
+       qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_txq_info *qinfo)
+{
+       struct ixgbe_tx_queue *txq;
+
+       txq = dev->data->tx_queues[queue_id];
+
+       qinfo->nb_desc = txq->nb_tx_desc;
+
+       qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+       qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+       qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+       qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+       qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+       qinfo->conf.txq_flags = txq->txq_flags;
+       qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
 /*
  * [VF] Initializes Receive Unit.
  */
index b9eca67..475a800 100644 (file)
 #ifndef _IXGBE_RXTX_H_
 #define _IXGBE_RXTX_H_
 
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
+ * also optimize cache line size effect. H/W supports up to cache line size 128.
+ */
+#define        IXGBE_ALIGN     128
+
+#define IXGBE_RXD_ALIGN        (IXGBE_ALIGN / sizeof(union ixgbe_adv_rx_desc))
+#define IXGBE_TXD_ALIGN        (IXGBE_ALIGN / sizeof(union ixgbe_adv_tx_desc))
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * descriptors should meet the following condition:
+ *      (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
+ */
+#define        IXGBE_MIN_RING_DESC     32
+#define        IXGBE_MAX_RING_DESC     4096
 
 #define RTE_PMD_IXGBE_TX_MAX_BURST 32
 #define RTE_PMD_IXGBE_RX_MAX_BURST 32