net: add rte prefix to ether structures
[dpdk.git] / drivers / net / ena / ena_ethdev.c
index 3647788..1d832f9 100644 (file)
@@ -31,6 +31,7 @@
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 
+#include <rte_string_fns.h>
 #include <rte_ether.h>
 #include <rte_ethdev_driver.h>
 #include <rte_ethdev_pci.h>
@@ -53,9 +54,9 @@
 #include <ena_admin_defs.h>
 #include <ena_eth_io_defs.h>
 
-#define DRV_MODULE_VER_MAJOR   1
-#define DRV_MODULE_VER_MINOR   1
-#define DRV_MODULE_VER_SUBMINOR        1
+#define DRV_MODULE_VER_MAJOR   2
+#define DRV_MODULE_VER_MINOR   0
+#define DRV_MODULE_VER_SUBMINOR        0
 
 #define ENA_IO_TXQ_IDX(q)      (2 * (q))
 #define ENA_IO_RXQ_IDX(q)      (2 * (q) + 1)
@@ -80,7 +81,6 @@
 #define ENA_RX_RSS_TABLE_LOG_SIZE  7
 #define ENA_RX_RSS_TABLE_SIZE  (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
 #define ENA_HASH_KEY_SIZE      40
-#define ENA_ETH_SS_STATS       0xFF
 #define ETH_GSTRING_LEN        32
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
@@ -97,11 +97,6 @@ struct ena_stats {
        int stat_offset;
 };
 
-#define ENA_STAT_ENA_COM_ENTRY(stat) { \
-       .name = #stat, \
-       .stat_offset = offsetof(struct ena_com_stats_admin, stat) \
-}
-
 #define ENA_STAT_ENTRY(stat, stat_type) { \
        .name = #stat, \
        .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
@@ -126,54 +121,36 @@ struct ena_stats {
 uint32_t ena_alloc_cnt;
 
 static const struct ena_stats ena_stats_global_strings[] = {
-       ENA_STAT_GLOBAL_ENTRY(tx_timeout),
-       ENA_STAT_GLOBAL_ENTRY(io_suspend),
-       ENA_STAT_GLOBAL_ENTRY(io_resume),
        ENA_STAT_GLOBAL_ENTRY(wd_expired),
-       ENA_STAT_GLOBAL_ENTRY(interface_up),
-       ENA_STAT_GLOBAL_ENTRY(interface_down),
-       ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
+       ENA_STAT_GLOBAL_ENTRY(dev_start),
+       ENA_STAT_GLOBAL_ENTRY(dev_stop),
 };
 
 static const struct ena_stats ena_stats_tx_strings[] = {
        ENA_STAT_TX_ENTRY(cnt),
        ENA_STAT_TX_ENTRY(bytes),
-       ENA_STAT_TX_ENTRY(queue_stop),
-       ENA_STAT_TX_ENTRY(queue_wakeup),
-       ENA_STAT_TX_ENTRY(dma_mapping_err),
+       ENA_STAT_TX_ENTRY(prepare_ctx_err),
        ENA_STAT_TX_ENTRY(linearize),
        ENA_STAT_TX_ENTRY(linearize_failed),
        ENA_STAT_TX_ENTRY(tx_poll),
        ENA_STAT_TX_ENTRY(doorbells),
-       ENA_STAT_TX_ENTRY(prepare_ctx_err),
-       ENA_STAT_TX_ENTRY(missing_tx_comp),
        ENA_STAT_TX_ENTRY(bad_req_id),
+       ENA_STAT_TX_ENTRY(available_desc),
 };
 
 static const struct ena_stats ena_stats_rx_strings[] = {
        ENA_STAT_RX_ENTRY(cnt),
        ENA_STAT_RX_ENTRY(bytes),
-       ENA_STAT_RX_ENTRY(refil_partial),
+       ENA_STAT_RX_ENTRY(refill_partial),
        ENA_STAT_RX_ENTRY(bad_csum),
-       ENA_STAT_RX_ENTRY(page_alloc_fail),
-       ENA_STAT_RX_ENTRY(skb_alloc_fail),
-       ENA_STAT_RX_ENTRY(dma_mapping_err),
+       ENA_STAT_RX_ENTRY(mbuf_alloc_fail),
        ENA_STAT_RX_ENTRY(bad_desc_num),
-       ENA_STAT_RX_ENTRY(small_copy_len_pkt),
-};
-
-static const struct ena_stats ena_stats_ena_com_strings[] = {
-       ENA_STAT_ENA_COM_ENTRY(aborted_cmd),
-       ENA_STAT_ENA_COM_ENTRY(submitted_cmd),
-       ENA_STAT_ENA_COM_ENTRY(completed_cmd),
-       ENA_STAT_ENA_COM_ENTRY(out_of_space),
-       ENA_STAT_ENA_COM_ENTRY(no_completion),
+       ENA_STAT_RX_ENTRY(bad_req_id),
 };
 
 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
 #define ENA_STATS_ARRAY_TX     ARRAY_SIZE(ena_stats_tx_strings)
 #define ENA_STATS_ARRAY_RX     ARRAY_SIZE(ena_stats_rx_strings)
-#define ENA_STATS_ARRAY_ENA_COM        ARRAY_SIZE(ena_stats_ena_com_strings)
 
 #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
                        DEV_TX_OFFLOAD_UDP_CKSUM |\
@@ -259,11 +236,20 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev,
 static int ena_rss_reta_query(struct rte_eth_dev *dev,
                              struct rte_eth_rss_reta_entry64 *reta_conf,
                              uint16_t reta_size);
-static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
 static void ena_interrupt_handler_rte(void *cb_arg);
 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
 static void ena_destroy_device(struct rte_eth_dev *eth_dev);
 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev);
+static int ena_xstats_get_names(struct rte_eth_dev *dev,
+                               struct rte_eth_xstat_name *xstats_names,
+                               unsigned int n);
+static int ena_xstats_get(struct rte_eth_dev *dev,
+                         struct rte_eth_xstat *stats,
+                         unsigned int n);
+static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
+                               const uint64_t *ids,
+                               uint64_t *values,
+                               unsigned int n);
 
 static const struct eth_dev_ops ena_dev_ops = {
        .dev_configure        = ena_dev_configure,
@@ -274,6 +260,9 @@ static const struct eth_dev_ops ena_dev_ops = {
        .dev_stop             = ena_stop,
        .link_update          = ena_link_update,
        .stats_get            = ena_stats_get,
+       .xstats_get_names     = ena_xstats_get_names,
+       .xstats_get           = ena_xstats_get,
+       .xstats_get_by_id     = ena_xstats_get_by_id,
        .mtu_set              = ena_mtu_set,
        .rx_queue_release     = ena_rx_queue_release,
        .tx_queue_release     = ena_tx_queue_release,
@@ -390,6 +379,7 @@ static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
 
        rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
        rx_ring->adapter->trigger_reset = true;
+       ++rx_ring->rx_stats.bad_req_id;
 
        return -EFAULT;
 }
@@ -410,6 +400,7 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
                RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id);
 
        /* Trigger device reset */
+       ++tx_ring->tx_stats.bad_req_id;
        tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
        tx_ring->adapter->trigger_reset = true;
        return -EFAULT;
@@ -431,13 +422,11 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
 
        host_info->os_type = ENA_ADMIN_OS_DPDK;
        host_info->kernel_ver = RTE_VERSION;
-       snprintf((char *)host_info->kernel_ver_str,
-                sizeof(host_info->kernel_ver_str),
-                "%s", rte_version());
+       strlcpy((char *)host_info->kernel_ver_str, rte_version(),
+               sizeof(host_info->kernel_ver_str));
        host_info->os_dist = RTE_VERSION;
-       snprintf((char *)host_info->os_dist_str,
-                sizeof(host_info->os_dist_str),
-                "%s", rte_version());
+       strlcpy((char *)host_info->os_dist_str, rte_version(),
+               sizeof(host_info->os_dist_str));
        host_info->driver_version =
                (DRV_MODULE_VER_MAJOR) |
                (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
@@ -461,24 +450,12 @@ err:
        ena_com_delete_host_info(ena_dev);
 }
 
-static int
-ena_get_sset_count(struct rte_eth_dev *dev, int sset)
+/* This function calculates the number of xstats based on the current config */
+static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev)
 {
-       if (sset != ETH_SS_STATS)
-               return -EOPNOTSUPP;
-
-        /* Workaround for clang:
-        * touch internal structures to prevent
-        * compiler error
-        */
-       ENA_TOUCH(ena_stats_global_strings);
-       ENA_TOUCH(ena_stats_tx_strings);
-       ENA_TOUCH(ena_stats_rx_strings);
-       ENA_TOUCH(ena_stats_ena_com_strings);
-
-       return  dev->data->nb_tx_queues *
-               (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) +
-               ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
+       return ENA_STATS_ARRAY_GLOBAL +
+               (dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) +
+               (dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX);
 }
 
 static void ena_config_debug_area(struct ena_adapter *adapter)
@@ -486,11 +463,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
        u32 debug_area_size;
        int rc, ss_count;
 
-       ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS);
-       if (ss_count <= 0) {
-               RTE_LOG(ERR, PMD, "SS count is negative\n");
-               return;
-       }
+       ss_count = ena_xstats_calc_num(adapter->rte_dev);
 
        /* allocate 32 bytes for each string and 64bit for the value */
        debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
@@ -961,6 +934,7 @@ static void ena_stats_restart(struct rte_eth_dev *dev)
        rte_atomic64_init(&adapter->drv_stats->ierrors);
        rte_atomic64_init(&adapter->drv_stats->oerrors);
        rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
+       rte_atomic64_init(&adapter->drv_stats->rx_drops);
 }
 
 static int ena_stats_get(struct rte_eth_dev *dev,
@@ -971,6 +945,8 @@ static int ena_stats_get(struct rte_eth_dev *dev,
                (struct ena_adapter *)(dev->data->dev_private);
        struct ena_com_dev *ena_dev = &adapter->ena_dev;
        int rc;
+       int i;
+       int max_rings_stats;
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return -ENOTSUP;
@@ -991,13 +967,33 @@ static int ena_stats_get(struct rte_eth_dev *dev,
                                        ena_stats.rx_bytes_low);
        stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
                                        ena_stats.tx_bytes_low);
-       stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high,
-                                        ena_stats.rx_drops_low);
 
        /* Driver related stats */
+       stats->imissed = rte_atomic64_read(&adapter->drv_stats->rx_drops);
        stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
        stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
        stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
+
+       max_rings_stats = RTE_MIN(dev->data->nb_rx_queues,
+               RTE_ETHDEV_QUEUE_STAT_CNTRS);
+       for (i = 0; i < max_rings_stats; ++i) {
+               struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats;
+
+               stats->q_ibytes[i] = rx_stats->bytes;
+               stats->q_ipackets[i] = rx_stats->cnt;
+               stats->q_errors[i] = rx_stats->bad_desc_num +
+                       rx_stats->bad_req_id;
+       }
+
+       max_rings_stats = RTE_MIN(dev->data->nb_tx_queues,
+               RTE_ETHDEV_QUEUE_STAT_CNTRS);
+       for (i = 0; i < max_rings_stats; ++i) {
+               struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats;
+
+               stats->q_obytes[i] = tx_stats->bytes;
+               stats->q_opackets[i] = tx_stats->cnt;
+       }
+
        return 0;
 }
 
@@ -1066,6 +1062,7 @@ static int ena_start(struct rte_eth_dev *dev)
        rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
                        ena_timer_wd_callback, adapter);
 
+       ++adapter->dev_stats.dev_start;
        adapter->state = ENA_ADAPTER_STATE_RUNNING;
 
        return 0;
@@ -1094,6 +1091,7 @@ static void ena_stop(struct rte_eth_dev *dev)
                        RTE_LOG(ERR, PMD, "Device reset failed rc=%d\n", rc);
        }
 
+       ++adapter->dev_stats.dev_stop;
        adapter->state = ENA_ADAPTER_STATE_STOPPED;
 }
 
@@ -1205,8 +1203,11 @@ static int ena_queue_start(struct ena_ring *ring)
        ring->next_to_clean = 0;
        ring->next_to_use = 0;
 
-       if (ring->type == ENA_RING_TYPE_TX)
+       if (ring->type == ENA_RING_TYPE_TX) {
+               ring->tx_stats.available_desc =
+                       ena_com_free_desc(ring->ena_com_io_sq);
                return 0;
+       }
 
        bufs_num = ring->ring_size - 1;
        rc = ena_populate_rx_queue(ring, bufs_num);
@@ -1298,7 +1299,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
                txq->offloads =
                        tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
        }
-
        /* Store pointer to this queue in upper layer */
        txq->configured = 1;
        dev->data->tx_queues[queue_idx] = txq;
@@ -1410,6 +1410,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
        rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count);
        if (unlikely(rc < 0)) {
                rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
+               ++rxq->rx_stats.mbuf_alloc_fail;
                PMD_RX_LOG(DEBUG, "there are no enough free buffers");
                return 0;
        }
@@ -1447,6 +1448,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
                        "buffers (from %d)\n", rxq->id, i, count);
                rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]),
                                     count - i);
+               ++rxq->rx_stats.refill_partial;
        }
 
        /* When we submitted free recources to device... */
@@ -1577,6 +1579,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
                RTE_LOG(ERR, PMD, "Keep alive timeout\n");
                adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
                adapter->trigger_reset = true;
+               ++adapter->dev_stats.wd_expired;
        }
 }
 
@@ -1683,8 +1686,7 @@ static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev,
        if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
                io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
 
-       io_queue_num = RTE_MIN(rte_lcore_count(), ENA_MAX_NUM_IO_QUEUES);
-       io_queue_num = RTE_MIN(io_queue_num, io_rx_num);
+       io_queue_num = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
        io_queue_num = RTE_MIN(io_queue_num, io_tx_sq_num);
        io_queue_num = RTE_MIN(io_queue_num, io_tx_cq_num);
 
@@ -1712,19 +1714,20 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
        static int adapters_found;
        bool wd_state;
 
-       memset(adapter, 0, sizeof(struct ena_adapter));
-       ena_dev = &adapter->ena_dev;
-
        eth_dev->dev_ops = &ena_dev_ops;
        eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
        eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
        eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
-       adapter->rte_eth_dev_data = eth_dev->data;
-       adapter->rte_dev = eth_dev;
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       memset(adapter, 0, sizeof(struct ena_adapter));
+       ena_dev = &adapter->ena_dev;
+
+       adapter->rte_eth_dev_data = eth_dev->data;
+       adapter->rte_dev = eth_dev;
+
        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        adapter->pdev = pci_dev;
 
@@ -1800,14 +1803,19 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
        /* Set max MTU for this device */
        adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
 
-       /* set device support for TSO */
-       adapter->tso4_supported = get_feat_ctx.offload.tx &
-                                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+       /* set device support for offloads */
+       adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx &
+               ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0;
+       adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx &
+               ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0;
+       adapter->offloads.rx_csum_supported =
+               (get_feat_ctx.offload.rx_supported &
+               ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0;
 
        /* Copy MAC address and point DPDK to it */
-       eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr;
-       ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr,
-                       (struct ether_addr *)adapter->mac_addr);
+       eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr;
+       ether_addr_copy((struct rte_ether_addr *)get_feat_ctx.dev_attr.mac_addr,
+                       (struct rte_ether_addr *)adapter->mac_addr);
 
        /*
         * Pass the information to the rte_eth_dev_close() that it should also
@@ -1935,9 +1943,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
 {
        struct ena_adapter *adapter;
        struct ena_com_dev *ena_dev;
-       struct ena_com_dev_get_features_ctx feat;
        uint64_t rx_feat = 0, tx_feat = 0;
-       int rc = 0;
 
        ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
        ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
@@ -1956,26 +1962,16 @@ static void ena_infos_get(struct rte_eth_dev *dev,
                        ETH_LINK_SPEED_50G  |
                        ETH_LINK_SPEED_100G;
 
-       /* Get supported features from HW */
-       rc = ena_com_get_dev_attr_feat(ena_dev, &feat);
-       if (unlikely(rc)) {
-               RTE_LOG(ERR, PMD,
-                       "Cannot get attribute for ena device rc= %d\n", rc);
-               return;
-       }
-
        /* Set Tx & Rx features available for device */
-       if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
+       if (adapter->offloads.tso4_supported)
                tx_feat |= DEV_TX_OFFLOAD_TCP_TSO;
 
-       if (feat.offload.tx &
-           ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
+       if (adapter->offloads.tx_csum_supported)
                tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
                        DEV_TX_OFFLOAD_UDP_CKSUM |
                        DEV_TX_OFFLOAD_TCP_CKSUM;
 
-       if (feat.offload.rx_supported &
-           ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
+       if (adapter->offloads.rx_csum_supported)
                rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
                        DEV_RX_OFFLOAD_UDP_CKSUM  |
                        DEV_RX_OFFLOAD_TCP_CKSUM;
@@ -2062,6 +2058,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                        rx_ring->adapter->reset_reason =
                                ENA_REGS_RESET_TOO_MANY_RX_DESCS;
                        rx_ring->adapter->trigger_reset = true;
+                       ++rx_ring->rx_stats.bad_desc_num;
                        return 0;
                }
 
@@ -2105,19 +2102,28 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
                /* fill mbuf attributes if any */
                ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx);
+
+               if (unlikely(mbuf_head->ol_flags &
+                       (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD)))
+                       ++rx_ring->rx_stats.bad_csum;
+
                mbuf_head->hash.rss = ena_rx_ctx.hash;
 
                /* pass to DPDK application head mbuf */
                rx_pkts[recv_idx] = mbuf_head;
                recv_idx++;
+               rx_ring->rx_stats.bytes += mbuf_head->pkt_len;
        }
 
+       rx_ring->rx_stats.cnt += recv_idx;
        rx_ring->next_to_clean = next_to_clean;
 
        desc_in_use = desc_in_use - completed + 1;
        /* Burst refill to save doorbells, memory barriers, const interval */
-       if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size))
+       if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) {
+               ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
                ena_populate_rx_queue(rx_ring, ring_size - desc_in_use);
+       }
 
        return recv_idx;
 }
@@ -2145,7 +2151,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 * length of the ethernet header.
                 */
                if (unlikely(m->l2_len == 0))
-                       m->l2_len = sizeof(struct ether_hdr);
+                       m->l2_len = sizeof(struct rte_ether_hdr);
 
                ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
                                                 m->l2_len);
@@ -2157,21 +2163,21 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        /* If IPv4 header has DF flag enabled and TSO support is
                         * disabled, partial chcecksum should not be calculated.
                         */
-                       if (!tx_ring->adapter->tso4_supported)
+                       if (!tx_ring->adapter->offloads.tso4_supported)
                                continue;
                }
 
                if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
                                (ol_flags & PKT_TX_L4_MASK) ==
                                PKT_TX_SCTP_CKSUM) {
-                       rte_errno = -ENOTSUP;
+                       rte_errno = ENOTSUP;
                        return i;
                }
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
                ret = rte_validate_tx_offload(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
 #endif
@@ -2184,7 +2190,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                ret = rte_net_intel_cksum_flags_prepare(m,
                        ol_flags & ~PKT_TX_TCP_SEG);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
        }
@@ -2233,9 +2239,14 @@ static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
            (header_len < tx_ring->tx_max_header_size))
                return 0;
 
+       ++tx_ring->tx_stats.linearize;
        rc = rte_pktmbuf_linearize(mbuf);
-       if (unlikely(rc))
+       if (unlikely(rc)) {
                RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n");
+               rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);
+               ++tx_ring->tx_stats.linearize_failed;
+               return rc;
+       }
 
        return rc;
 }
@@ -2258,6 +2269,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint16_t push_len = 0;
        uint16_t delta = 0;
        int nb_hw_desc;
+       uint32_t total_length;
 
        /* Check adapter state */
        if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
@@ -2272,6 +2284,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
                mbuf = tx_pkts[sent_idx];
+               total_length = 0;
 
                rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
                if (unlikely(rc))
@@ -2337,6 +2350,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        ebuf++;
                        tx_info->num_of_bufs++;
                }
+               total_length += mbuf->data_len;
 
                while ((mbuf = mbuf->next) != NULL) {
                        seg_len = mbuf->data_len;
@@ -2349,6 +2363,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
                        ebuf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
                        ebuf->len = seg_len - delta;
+                       total_length += ebuf->len;
                        ebuf++;
                        tx_info->num_of_bufs++;
 
@@ -2369,20 +2384,25 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                /* prepare the packet's descriptors to dma engine */
                rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,
                                        &ena_tx_ctx, &nb_hw_desc);
-               if (unlikely(rc))
+               if (unlikely(rc)) {
+                       ++tx_ring->tx_stats.prepare_ctx_err;
                        break;
-
+               }
                tx_info->tx_descs = nb_hw_desc;
 
                next_to_use++;
+               tx_ring->tx_stats.cnt += tx_info->num_of_bufs;
+               tx_ring->tx_stats.bytes += total_length;
        }
+       tx_ring->tx_stats.available_desc =
+               ena_com_free_desc(tx_ring->ena_com_io_sq);
 
        /* If there are ready packets to be xmitted... */
        if (sent_idx > 0) {
                /* ...let HW do its best :-) */
                rte_wmb();
                ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
-
+               tx_ring->tx_stats.doorbells++;
                tx_ring->next_to_use = next_to_use;
        }
 
@@ -2409,16 +2429,177 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size)))
                        break;
        }
+       tx_ring->tx_stats.available_desc =
+               ena_com_free_desc(tx_ring->ena_com_io_sq);
 
        if (total_tx_descs > 0) {
                /* acknowledge completion of sent packets */
-               ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
                tx_ring->next_to_clean = next_to_clean;
+               ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
+               ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
        }
 
+       tx_ring->tx_stats.tx_poll++;
+
        return sent_idx;
 }
 
+/**
+ * DPDK callback to retrieve names of extended device statistics
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param[out] xstats_names
+ *   Buffer to insert names into.
+ * @param n
+ *   Number of names.
+ *
+ * @return
+ *   Number of xstats names.
+ */
+static int ena_xstats_get_names(struct rte_eth_dev *dev,
+                               struct rte_eth_xstat_name *xstats_names,
+                               unsigned int n)
+{
+       unsigned int xstats_count = ena_xstats_calc_num(dev);
+       unsigned int stat, i, count = 0;
+
+       if (n < xstats_count || !xstats_names)
+               return xstats_count;
+
+       for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++)
+               strcpy(xstats_names[count].name,
+                       ena_stats_global_strings[stat].name);
+
+       for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++)
+               for (i = 0; i < dev->data->nb_rx_queues; i++, count++)
+                       snprintf(xstats_names[count].name,
+                               sizeof(xstats_names[count].name),
+                               "rx_q%d_%s", i,
+                               ena_stats_rx_strings[stat].name);
+
+       for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++)
+               for (i = 0; i < dev->data->nb_tx_queues; i++, count++)
+                       snprintf(xstats_names[count].name,
+                               sizeof(xstats_names[count].name),
+                               "tx_q%d_%s", i,
+                               ena_stats_tx_strings[stat].name);
+
+       return xstats_count;
+}
+
+/**
+ * DPDK callback to get extended device statistics.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param[out] stats
+ *   Stats table output buffer.
+ * @param n
+ *   The size of the stats table.
+ *
+ * @return
+ *   Number of xstats on success, negative on failure.
+ */
+static int ena_xstats_get(struct rte_eth_dev *dev,
+                         struct rte_eth_xstat *xstats,
+                         unsigned int n)
+{
+       struct ena_adapter *adapter =
+                       (struct ena_adapter *)(dev->data->dev_private);
+       unsigned int xstats_count = ena_xstats_calc_num(dev);
+       unsigned int stat, i, count = 0;
+       int stat_offset;
+       void *stats_begin;
+
+       if (n < xstats_count)
+               return xstats_count;
+
+       if (!xstats)
+               return 0;
+
+       for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) {
+               stat_offset = ena_stats_rx_strings[stat].stat_offset;
+               stats_begin = &adapter->dev_stats;
+
+               xstats[count].id = count;
+               xstats[count].value = *((uint64_t *)
+                       ((char *)stats_begin + stat_offset));
+       }
+
+       for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) {
+               for (i = 0; i < dev->data->nb_rx_queues; i++, count++) {
+                       stat_offset = ena_stats_rx_strings[stat].stat_offset;
+                       stats_begin = &adapter->rx_ring[i].rx_stats;
+
+                       xstats[count].id = count;
+                       xstats[count].value = *((uint64_t *)
+                               ((char *)stats_begin + stat_offset));
+               }
+       }
+
+       for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) {
+               for (i = 0; i < dev->data->nb_tx_queues; i++, count++) {
+                       stat_offset = ena_stats_tx_strings[stat].stat_offset;
+                       stats_begin = &adapter->tx_ring[i].rx_stats;
+
+                       xstats[count].id = count;
+                       xstats[count].value = *((uint64_t *)
+                               ((char *)stats_begin + stat_offset));
+               }
+       }
+
+       return count;
+}
+
+static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
+                               const uint64_t *ids,
+                               uint64_t *values,
+                               unsigned int n)
+{
+       struct ena_adapter *adapter =
+               (struct ena_adapter *)(dev->data->dev_private);
+       uint64_t id;
+       uint64_t rx_entries, tx_entries;
+       unsigned int i;
+       int qid;
+       int valid = 0;
+       for (i = 0; i < n; ++i) {
+               id = ids[i];
+               /* Check if id belongs to global statistics */
+               if (id < ENA_STATS_ARRAY_GLOBAL) {
+                       values[i] = *((uint64_t *)&adapter->dev_stats + id);
+                       ++valid;
+                       continue;
+               }
+
+               /* Check if id belongs to rx queue statistics */
+               id -= ENA_STATS_ARRAY_GLOBAL;
+               rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues;
+               if (id < rx_entries) {
+                       qid = id % dev->data->nb_rx_queues;
+                       id /= dev->data->nb_rx_queues;
+                       values[i] = *((uint64_t *)
+                               &adapter->rx_ring[qid].rx_stats + id);
+                       ++valid;
+                       continue;
+               }
+                               /* Check if id belongs to rx queue statistics */
+               id -= rx_entries;
+               tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues;
+               if (id < tx_entries) {
+                       qid = id % dev->data->nb_tx_queues;
+                       id /= dev->data->nb_tx_queues;
+                       values[i] = *((uint64_t *)
+                               &adapter->tx_ring[qid].tx_stats + id);
+                       ++valid;
+                       continue;
+               }
+       }
+
+       return valid;
+}
+
 /*********************************************************************
  *  PMD configuration
  *********************************************************************/
@@ -2505,8 +2686,14 @@ static void ena_keep_alive(void *adapter_data,
                           __rte_unused struct ena_admin_aenq_entry *aenq_e)
 {
        struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+       struct ena_admin_aenq_keep_alive_desc *desc;
+       uint64_t rx_drops;
 
        adapter->timestamp_wd = rte_get_timer_cycles();
+
+       desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
+       rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
+       rte_atomic64_set(&adapter->drv_stats->rx_drops, rx_drops);
 }
 
 /**