.udp_tunnel_add               = i40e_dev_udp_tunnel_add,
        .udp_tunnel_del               = i40e_dev_udp_tunnel_del,
        .filter_ctrl                  = i40e_dev_filter_ctrl,
+       .rxq_info_get                 = i40e_rxq_info_get,
+       .txq_info_get                 = i40e_txq_info_get,
        .mirror_rule_set              = i40e_mirror_rule_set,
        .mirror_rule_reset            = i40e_mirror_rule_reset,
        .timesync_enable              = i40e_timesync_enable,
                                ETH_TXQ_FLAGS_NOOFFLOADS,
        };
 
+       dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+               .nb_max = I40E_MAX_RING_DESC,
+               .nb_min = I40E_MIN_RING_DESC,
+               .nb_align = I40E_ALIGN_RING_DESC,
+       };
+
+       dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+               .nb_max = I40E_MAX_RING_DESC,
+               .nb_min = I40E_MIN_RING_DESC,
+               .nb_align = I40E_ALIGN_RING_DESC,
+       };
+
        if (pf->flags & I40E_FLAG_VMDQ) {
                dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
                dev_info->vmdq_queue_base = dev_info->max_rx_queues;
 
                          enum rte_filter_op filter_op,
                          void *arg);
 
+void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_rxq_info *qinfo);
+void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_txq_info *qinfo);
+
 /* I40E_DEV_PRIVATE_TO */
 #define I40E_DEV_PRIVATE_TO_PF(adapter) \
        (&((struct i40e_adapter *)adapter)->pf)
 
                .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
                                ETH_TXQ_FLAGS_NOOFFLOADS,
        };
+
+       dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+               .nb_max = I40E_MAX_RING_DESC,
+               .nb_min = I40E_MIN_RING_DESC,
+               .nb_align = I40E_ALIGN_RING_DESC,
+       };
+
+       dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+               .nb_max = I40E_MAX_RING_DESC,
+               .nb_min = I40E_MIN_RING_DESC,
+               .nb_align = I40E_ALIGN_RING_DESC,
+       };
 }
 
 static void
 
        return I40E_SUCCESS;
 }
 
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_rxq_info *qinfo)
+{
+       struct i40e_rx_queue *rxq;
+
+       rxq = dev->data->rx_queues[queue_id];
+
+       qinfo->mp = rxq->mp;
+       qinfo->scattered_rx = dev->data->scattered_rx;
+       qinfo->nb_desc = rxq->nb_rx_desc;
+
+       qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+       qinfo->conf.rx_drop_en = rxq->drop_en;
+       qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_txq_info *qinfo)
+{
+       struct i40e_tx_queue *txq;
+
+       txq = dev->data->tx_queues[queue_id];
+
+       qinfo->nb_desc = txq->nb_tx_desc;
+
+       qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+       qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+       qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+       qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+       qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+       qinfo->conf.txq_flags = txq->txq_flags;
+       qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
 void __attribute__((cold))
 i40e_set_rx_function(struct rte_eth_dev *dev)
 {