net/thunderx: add statistics
[dpdk.git] / drivers / net / thunderx / nicvf_ethdev.c
index 1bea851..e20f0d9 100644 (file)
@@ -143,6 +143,49 @@ nicvf_dev_link_update(struct rte_eth_dev *dev,
        return nicvf_atomic_write_link_status(dev, &link);
 }
 
+static int
+nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (frame_size > NIC_HW_MAX_FRS)
+               return -EINVAL;
+
+       if (frame_size < NIC_HW_MIN_FRS)
+               return -EINVAL;
+
+       buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
+
+       /*
+        * Refuse mtu that requires the support of scattered packets
+        * when this feature has not been enabled before.
+        */
+       if (!dev->data->scattered_rx &&
+               (frame_size + 2 * VLAN_TAG_SIZE > buffsz))
+               return -EINVAL;
+
+       /* check <seg size> * <max_seg>  >= max_frame */
+       if (dev->data->scattered_rx &&
+               (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
+               return -EINVAL;
+
+       if (frame_size > ETHER_MAX_LEN)
+               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+       else
+               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+       if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
+               return -EINVAL;
+
+       /* Update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
+       nic->mtu = mtu;
+       return 0;
+}
+
 static int
 nicvf_dev_get_reg_length(struct rte_eth_dev *dev  __rte_unused)
 {
@@ -168,6 +211,551 @@ nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
        return -ENOTSUP;
 }
 
+static void
+nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+       uint16_t qidx;
+       struct nicvf_hw_rx_qstats rx_qstats;
+       struct nicvf_hw_tx_qstats tx_qstats;
+       struct nicvf_hw_stats port_stats;
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+
+       /* Reading per RX ring stats */
+       for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
+               if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+                       break;
+
+               nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
+               stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
+               stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
+       }
+
+       /* Reading per TX ring stats */
+       for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) {
+               if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+                       break;
+
+               nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
+               stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
+               stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
+       }
+
+       nicvf_hw_get_stats(nic, &port_stats);
+       stats->ibytes = port_stats.rx_bytes;
+       stats->ipackets = port_stats.rx_ucast_frames;
+       stats->ipackets += port_stats.rx_bcast_frames;
+       stats->ipackets += port_stats.rx_mcast_frames;
+       stats->ierrors = port_stats.rx_l2_errors;
+       stats->imissed = port_stats.rx_drop_red;
+       stats->imissed += port_stats.rx_drop_overrun;
+       stats->imissed += port_stats.rx_drop_bcast;
+       stats->imissed += port_stats.rx_drop_mcast;
+       stats->imissed += port_stats.rx_drop_l3_bcast;
+       stats->imissed += port_stats.rx_drop_l3_mcast;
+
+       stats->obytes = port_stats.tx_bytes_ok;
+       stats->opackets = port_stats.tx_ucast_frames_ok;
+       stats->opackets += port_stats.tx_bcast_frames_ok;
+       stats->opackets += port_stats.tx_mcast_frames_ok;
+       stats->oerrors = port_stats.tx_drops;
+}
+
+static void
+nicvf_dev_stats_reset(struct rte_eth_dev *dev)
+{
+       int i;
+       uint16_t rxqs = 0, txqs = 0;
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++)
+               rxqs |= (0x3 << (i * 2));
+       for (i = 0; i < dev->data->nb_tx_queues; i++)
+               txqs |= (0x3 << (i * 2));
+
+       nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
+}
+
+/* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
+static void
+nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
+{
+}
+
+static inline uint64_t
+nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
+{
+       uint64_t nic_rss = 0;
+
+       if (ethdev_rss & ETH_RSS_IPV4)
+               nic_rss |= RSS_IP_ENA;
+
+       if (ethdev_rss & ETH_RSS_IPV6)
+               nic_rss |= RSS_IP_ENA;
+
+       if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
+               nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
+
+       if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
+               nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
+
+       if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
+               nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
+
+       if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
+               nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
+
+       if (ethdev_rss & ETH_RSS_PORT)
+               nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
+
+       if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
+               if (ethdev_rss & ETH_RSS_VXLAN)
+                       nic_rss |= RSS_TUN_VXLAN_ENA;
+
+               if (ethdev_rss & ETH_RSS_GENEVE)
+                       nic_rss |= RSS_TUN_GENEVE_ENA;
+
+               if (ethdev_rss & ETH_RSS_NVGRE)
+                       nic_rss |= RSS_TUN_NVGRE_ENA;
+       }
+
+       return nic_rss;
+}
+
+static inline uint64_t
+nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
+{
+       uint64_t ethdev_rss = 0;
+
+       if (nic_rss & RSS_IP_ENA)
+               ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
+
+       if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
+               ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
+                               ETH_RSS_NONFRAG_IPV6_TCP);
+
+       if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
+               ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
+                               ETH_RSS_NONFRAG_IPV6_UDP);
+
+       if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
+               ethdev_rss |= ETH_RSS_PORT;
+
+       if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
+               if (nic_rss & RSS_TUN_VXLAN_ENA)
+                       ethdev_rss |= ETH_RSS_VXLAN;
+
+               if (nic_rss & RSS_TUN_GENEVE_ENA)
+                       ethdev_rss |= ETH_RSS_GENEVE;
+
+               if (nic_rss & RSS_TUN_NVGRE_ENA)
+                       ethdev_rss |= ETH_RSS_NVGRE;
+       }
+       return ethdev_rss;
+}
+
+static int
+nicvf_dev_reta_query(struct rte_eth_dev *dev,
+                    struct rte_eth_rss_reta_entry64 *reta_conf,
+                    uint16_t reta_size)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+       int ret, i, j;
+
+       if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
+               RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
+               return -EINVAL;
+       }
+
+       ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
+       if (ret)
+               return ret;
+
+       /* Copy RETA table */
+       for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
+               for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+                       if ((reta_conf[i].mask >> j) & 0x01)
+                               reta_conf[i].reta[j] = tbl[j];
+       }
+
+       return 0;
+}
+
+static int
+nicvf_dev_reta_update(struct rte_eth_dev *dev,
+                     struct rte_eth_rss_reta_entry64 *reta_conf,
+                     uint16_t reta_size)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+       int ret, i, j;
+
+       if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
+               RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
+               return -EINVAL;
+       }
+
+       ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
+       if (ret)
+               return ret;
+
+       /* Copy RETA table */
+       for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
+               for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+                       if ((reta_conf[i].mask >> j) & 0x01)
+                               tbl[j] = reta_conf[i].reta[j];
+       }
+
+       return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
+static int
+nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+                           struct rte_eth_rss_conf *rss_conf)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+
+       if (rss_conf->rss_key)
+               nicvf_rss_get_key(nic, rss_conf->rss_key);
+
+       rss_conf->rss_key_len =  RSS_HASH_KEY_BYTE_SIZE;
+       rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
+       return 0;
+}
+
+static int
+nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
+                         struct rte_eth_rss_conf *rss_conf)
+{
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+       uint64_t nic_rss;
+
+       if (rss_conf->rss_key &&
+               rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
+               RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
+                               rss_conf->rss_key_len);
+               return -EINVAL;
+       }
+
+       if (rss_conf->rss_key)
+               nicvf_rss_set_key(nic, rss_conf->rss_key);
+
+       nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
+       nicvf_rss_set_cfg(nic, nic_rss);
+       return 0;
+}
+
+static int
+nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx,
+                   uint32_t desc_cnt)
+{
+       const struct rte_memzone *rz;
+       uint32_t ring_size = desc_cnt * sizeof(union cq_entry_t);
+
+       rz = rte_eth_dma_zone_reserve(nic->eth_dev, "cq_ring", qidx, ring_size,
+                                       NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
+       if (rz == NULL) {
+               PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
+               return -ENOMEM;
+       }
+
+       memset(rz->addr, 0, ring_size);
+
+       rxq->phys = rz->phys_addr;
+       rxq->desc = rz->addr;
+       rxq->qlen_mask = desc_cnt - 1;
+
+       return 0;
+}
+
+static int
+nicvf_qset_sq_alloc(struct nicvf *nic,  struct nicvf_txq *sq, uint16_t qidx,
+                   uint32_t desc_cnt)
+{
+       const struct rte_memzone *rz;
+       uint32_t ring_size = desc_cnt * sizeof(union sq_entry_t);
+
+       rz = rte_eth_dma_zone_reserve(nic->eth_dev, "sq", qidx, ring_size,
+                               NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
+       if (rz == NULL) {
+               PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
+               return -ENOMEM;
+       }
+
+       memset(rz->addr, 0, ring_size);
+
+       sq->phys = rz->phys_addr;
+       sq->desc = rz->addr;
+       sq->qlen_mask = desc_cnt - 1;
+
+       return 0;
+}
+
+static inline void
+nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
+{
+       uint32_t head;
+
+       head = txq->head;
+       while (head != txq->tail) {
+               if (txq->txbuffs[head]) {
+                       rte_pktmbuf_free_seg(txq->txbuffs[head]);
+                       txq->txbuffs[head] = NULL;
+               }
+               head++;
+               head = head & txq->qlen_mask;
+       }
+}
+
+static void
+nicvf_tx_queue_reset(struct nicvf_txq *txq)
+{
+       uint32_t txq_desc_cnt = txq->qlen_mask + 1;
+
+       memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
+       memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
+       txq->tail = 0;
+       txq->head = 0;
+       txq->xmit_bufs = 0;
+}
+
+static void
+nicvf_dev_tx_queue_release(void *sq)
+{
+       struct nicvf_txq *txq;
+
+       PMD_INIT_FUNC_TRACE();
+
+       txq = (struct nicvf_txq *)sq;
+       if (txq) {
+               if (txq->txbuffs != NULL) {
+                       nicvf_tx_queue_release_mbufs(txq);
+                       rte_free(txq->txbuffs);
+                       txq->txbuffs = NULL;
+               }
+               rte_free(txq);
+       }
+}
+
+static int
+nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+                        uint16_t nb_desc, unsigned int socket_id,
+                        const struct rte_eth_txconf *tx_conf)
+{
+       uint16_t tx_free_thresh;
+       uint8_t is_single_pool;
+       struct nicvf_txq *txq;
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Socket id check */
+       if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
+               PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
+               socket_id, nic->node);
+
+       /* Tx deferred start is not supported */
+       if (tx_conf->tx_deferred_start) {
+               PMD_INIT_LOG(ERR, "Tx deferred start not supported");
+               return -EINVAL;
+       }
+
+       /* Roundup nb_desc to available qsize and validate max number of desc */
+       nb_desc = nicvf_qsize_sq_roundup(nb_desc);
+       if (nb_desc == 0) {
+               PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
+               return -EINVAL;
+       }
+
+       /* Validate tx_free_thresh */
+       tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+                               tx_conf->tx_free_thresh :
+                               NICVF_DEFAULT_TX_FREE_THRESH);
+
+       if (tx_free_thresh > (nb_desc) ||
+               tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
+               PMD_INIT_LOG(ERR,
+                       "tx_free_thresh must be less than the number of TX "
+                       "descriptors. (tx_free_thresh=%u port=%d "
+                       "queue=%d)", (unsigned int)tx_free_thresh,
+                       (int)dev->data->port_id, (int)qidx);
+               return -EINVAL;
+       }
+
+       /* Free memory prior to re-allocation if needed. */
+       if (dev->data->tx_queues[qidx] != NULL) {
+               PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+                               qidx);
+               nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]);
+               dev->data->tx_queues[qidx] = NULL;
+       }
+
+       /* Allocating tx queue data structure */
+       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
+                                       RTE_CACHE_LINE_SIZE, nic->node);
+       if (txq == NULL) {
+               PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx);
+               return -ENOMEM;
+       }
+
+       txq->nic = nic;
+       txq->queue_id = qidx;
+       txq->tx_free_thresh = tx_free_thresh;
+       txq->txq_flags = tx_conf->txq_flags;
+       txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
+       txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
+       is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
+                               txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
+
+       /* Choose optimum free threshold value for multipool case */
+       if (!is_single_pool) {
+               txq->tx_free_thresh = (uint16_t)
+               (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
+                               NICVF_TX_FREE_MPOOL_THRESH :
+                               tx_conf->tx_free_thresh);
+       }
+
+       /* Allocate software ring */
+       txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
+                               nb_desc * sizeof(struct rte_mbuf *),
+                               RTE_CACHE_LINE_SIZE, nic->node);
+
+       if (txq->txbuffs == NULL) {
+               nicvf_dev_tx_queue_release(txq);
+               return -ENOMEM;
+       }
+
+       if (nicvf_qset_sq_alloc(nic, txq, qidx, nb_desc)) {
+               PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
+               nicvf_dev_tx_queue_release(txq);
+               return -ENOMEM;
+       }
+
+       nicvf_tx_queue_reset(txq);
+
+       PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
+                       qidx, txq, nb_desc, txq->desc, txq->phys);
+
+       dev->data->tx_queues[qidx] = txq;
+       dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return 0;
+}
+
+static void
+nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
+{
+       rxq->head = 0;
+       rxq->available_space = 0;
+       rxq->recv_buffers = 0;
+}
+
+static void
+nicvf_dev_rx_queue_release(void *rx_queue)
+{
+       struct nicvf_rxq *rxq = rx_queue;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (rxq)
+               rte_free(rxq);
+}
+
+static int
+nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+                        uint16_t nb_desc, unsigned int socket_id,
+                        const struct rte_eth_rxconf *rx_conf,
+                        struct rte_mempool *mp)
+{
+       uint16_t rx_free_thresh;
+       struct nicvf_rxq *rxq;
+       struct nicvf *nic = nicvf_pmd_priv(dev);
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Socket id check */
+       if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
+               PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
+               socket_id, nic->node);
+
+       /* Mempool memory should be contiguous */
+       if (mp->nb_mem_chunks != 1) {
+               PMD_INIT_LOG(ERR, "Non contiguous mempool, check huge page sz");
+               return -EINVAL;
+       }
+
+       /* Rx deferred start is not supported */
+       if (rx_conf->rx_deferred_start) {
+               PMD_INIT_LOG(ERR, "Rx deferred start not supported");
+               return -EINVAL;
+       }
+
+       /* Roundup nb_desc to available qsize and validate max number of desc */
+       nb_desc = nicvf_qsize_cq_roundup(nb_desc);
+       if (nb_desc == 0) {
+               PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
+               return -EINVAL;
+       }
+
+       /* Check rx_free_thresh upper bound */
+       rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
+                               rx_conf->rx_free_thresh :
+                               NICVF_DEFAULT_RX_FREE_THRESH);
+       if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
+               rx_free_thresh >= nb_desc * .75) {
+               PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
+                               rx_free_thresh);
+               return -EINVAL;
+       }
+
+       /* Free memory prior to re-allocation if needed */
+       if (dev->data->rx_queues[qidx] != NULL) {
+               PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+                               qidx);
+               nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]);
+               dev->data->rx_queues[qidx] = NULL;
+       }
+
+       /* Allocate rxq memory */
+       rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
+                                       RTE_CACHE_LINE_SIZE, nic->node);
+       if (rxq == NULL) {
+               PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx);
+               return -ENOMEM;
+       }
+
+       rxq->nic = nic;
+       rxq->pool = mp;
+       rxq->queue_id = qidx;
+       rxq->port_id = dev->data->port_id;
+       rxq->rx_free_thresh = rx_free_thresh;
+       rxq->rx_drop_en = rx_conf->rx_drop_en;
+       rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
+       rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
+       rxq->precharge_cnt = 0;
+       rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
+
+       /* Alloc completion queue */
+       if (nicvf_qset_cq_alloc(nic, rxq, rxq->queue_id, nb_desc)) {
+               PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
+               nicvf_dev_rx_queue_release(rxq);
+               return -ENOMEM;
+       }
+
+       nicvf_rx_queue_reset(rxq);
+
+       PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
+                       qidx, rxq, mp->name, nb_desc,
+                       rte_mempool_count(mp), rxq->phys);
+
+       dev->data->rx_queues[qidx] = rxq;
+       dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return 0;
+}
+
 static void
 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
@@ -293,7 +881,19 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 static const struct eth_dev_ops nicvf_eth_dev_ops = {
        .dev_configure            = nicvf_dev_configure,
        .link_update              = nicvf_dev_link_update,
+       .stats_get                = nicvf_dev_stats_get,
+       .stats_reset              = nicvf_dev_stats_reset,
+       .promiscuous_enable       = nicvf_dev_promisc_enable,
        .dev_infos_get            = nicvf_dev_info_get,
+       .mtu_set                  = nicvf_dev_set_mtu,
+       .reta_update              = nicvf_dev_reta_update,
+       .reta_query               = nicvf_dev_reta_query,
+       .rss_hash_update          = nicvf_dev_rss_hash_update,
+       .rss_hash_conf_get        = nicvf_dev_rss_hash_conf_get,
+       .rx_queue_setup           = nicvf_dev_rx_queue_setup,
+       .rx_queue_release         = nicvf_dev_rx_queue_release,
+       .tx_queue_setup           = nicvf_dev_tx_queue_setup,
+       .tx_queue_release         = nicvf_dev_tx_queue_release,
        .get_reg_length           = nicvf_dev_get_reg_length,
        .get_reg                  = nicvf_dev_get_regs,
 };