net/ena: remove Tx mbuf linearization
[dpdk.git] / drivers / net / ena / ena_ethdev.c
index fe9bac8..deaee30 100644 (file)
@@ -21,7 +21,7 @@
 #include <ena_eth_io_defs.h>
 
 #define DRV_MODULE_VER_MAJOR   2
-#define DRV_MODULE_VER_MINOR   4
+#define DRV_MODULE_VER_MINOR   5
 #define DRV_MODULE_VER_SUBMINOR        0
 
 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
@@ -93,12 +93,11 @@ static const struct ena_stats ena_stats_tx_strings[] = {
        ENA_STAT_TX_ENTRY(cnt),
        ENA_STAT_TX_ENTRY(bytes),
        ENA_STAT_TX_ENTRY(prepare_ctx_err),
-       ENA_STAT_TX_ENTRY(linearize),
-       ENA_STAT_TX_ENTRY(linearize_failed),
        ENA_STAT_TX_ENTRY(tx_poll),
        ENA_STAT_TX_ENTRY(doorbells),
        ENA_STAT_TX_ENTRY(bad_req_id),
        ENA_STAT_TX_ENTRY(available_desc),
+       ENA_STAT_TX_ENTRY(missed_tx),
 };
 
 static const struct ena_stats ena_stats_rx_strings[] = {
@@ -116,13 +115,13 @@ static const struct ena_stats ena_stats_rx_strings[] = {
 #define ENA_STATS_ARRAY_TX     ARRAY_SIZE(ena_stats_tx_strings)
 #define ENA_STATS_ARRAY_RX     ARRAY_SIZE(ena_stats_rx_strings)
 
-#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
-                       DEV_TX_OFFLOAD_UDP_CKSUM |\
-                       DEV_TX_OFFLOAD_IPV4_CKSUM |\
-                       DEV_TX_OFFLOAD_TCP_TSO)
-#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
-                      PKT_TX_IP_CKSUM |\
-                      PKT_TX_TCP_SEG)
+#define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
+                       RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
+                       RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
+                       RTE_ETH_TX_OFFLOAD_TCP_TSO)
+#define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\
+                      RTE_MBUF_F_TX_IP_CKSUM |\
+                      RTE_MBUF_F_TX_TCP_SEG)
 
 /** Vendor ID used by Amazon devices */
 #define PCI_VENDOR_ID_AMAZON 0x1D0F
@@ -130,15 +129,14 @@ static const struct ena_stats ena_stats_rx_strings[] = {
 #define PCI_DEVICE_ID_ENA_VF           0xEC20
 #define PCI_DEVICE_ID_ENA_VF_RSERV0    0xEC21
 
-#define        ENA_TX_OFFLOAD_MASK     (\
-       PKT_TX_L4_MASK |         \
-       PKT_TX_IPV6 |            \
-       PKT_TX_IPV4 |            \
-       PKT_TX_IP_CKSUM |        \
-       PKT_TX_TCP_SEG)
+#define        ENA_TX_OFFLOAD_MASK     (RTE_MBUF_F_TX_L4_MASK |         \
+       RTE_MBUF_F_TX_IPV6 |            \
+       RTE_MBUF_F_TX_IPV4 |            \
+       RTE_MBUF_F_TX_IP_CKSUM |        \
+       RTE_MBUF_F_TX_TCP_SEG)
 
 #define        ENA_TX_OFFLOAD_NOTSUP_MASK      \
-       (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
+       (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
 
 /** HW specific offloads capabilities. */
 /* IPv4 checksum offload. */
@@ -223,6 +221,10 @@ static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring);
 static int ena_queue_start_all(struct rte_eth_dev *dev,
                               enum ena_ring_type ring_type);
 static void ena_stats_restart(struct rte_eth_dev *dev);
+static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter);
 static int ena_infos_get(struct rte_eth_dev *dev,
                         struct rte_eth_dev_info *dev_info);
 static void ena_interrupt_handler_rte(void *cb_arg);
@@ -291,24 +293,24 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
        if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
                packet_type |= RTE_PTYPE_L3_IPV4;
                if (unlikely(ena_rx_ctx->l3_csum_err))
-                       ol_flags |= PKT_RX_IP_CKSUM_BAD;
+                       ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
                else
-                       ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+                       ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
        } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
                packet_type |= RTE_PTYPE_L3_IPV6;
        }
 
        if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag)
-               ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+               ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
        else
                if (unlikely(ena_rx_ctx->l4_csum_err))
-                       ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                       ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
                else
-                       ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+                       ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
 
        if (fill_hash &&
            likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) {
-               ol_flags |= PKT_RX_RSS_HASH;
+               ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
                mbuf->hash.rss = ena_rx_ctx->hash;
        }
 
@@ -326,19 +328,19 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
        if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
            (queue_offloads & QUEUE_OFFLOADS)) {
                /* check if TSO is required */
-               if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
-                   (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
+               if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
+                   (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
                        ena_tx_ctx->tso_enable = true;
 
                        ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
                }
 
                /* check if L3 checksum is needed */
-               if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
-                   (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
+               if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
+                   (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
                        ena_tx_ctx->l3_csum_enable = true;
 
-               if (mbuf->ol_flags & PKT_TX_IPV6) {
+               if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) {
                        ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
                } else {
                        ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
@@ -351,13 +353,13 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
                }
 
                /* check if L4 checksum is needed */
-               if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
-                   (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
+               if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) &&
+                   (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
                        ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
                        ena_tx_ctx->l4_csum_enable = true;
-               } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
-                               PKT_TX_UDP_CKSUM) &&
-                               (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+               } else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
+                               RTE_MBUF_F_TX_UDP_CKSUM) &&
+                               (queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
                        ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
                        ena_tx_ctx->l4_csum_enable = true;
                } else {
@@ -490,7 +492,7 @@ err:
 static int ena_close(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ena_adapter *adapter = dev->data->dev_private;
        int ret = 0;
 
@@ -563,16 +565,13 @@ static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
        struct ena_ring *ring = dev->data->rx_queues[qid];
 
        /* Free ring resources */
-       if (ring->rx_buffer_info)
-               rte_free(ring->rx_buffer_info);
+       rte_free(ring->rx_buffer_info);
        ring->rx_buffer_info = NULL;
 
-       if (ring->rx_refill_buffer)
-               rte_free(ring->rx_refill_buffer);
+       rte_free(ring->rx_refill_buffer);
        ring->rx_refill_buffer = NULL;
 
-       if (ring->empty_rx_reqs)
-               rte_free(ring->empty_rx_reqs);
+       rte_free(ring->empty_rx_reqs);
        ring->empty_rx_reqs = NULL;
 
        ring->configured = 0;
@@ -586,14 +585,11 @@ static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
        struct ena_ring *ring = dev->data->tx_queues[qid];
 
        /* Free ring resources */
-       if (ring->push_buf_intermediate_buf)
-               rte_free(ring->push_buf_intermediate_buf);
+       rte_free(ring->push_buf_intermediate_buf);
 
-       if (ring->tx_buffer_info)
-               rte_free(ring->tx_buffer_info);
+       rte_free(ring->tx_buffer_info);
 
-       if (ring->empty_tx_reqs)
-               rte_free(ring->empty_tx_reqs);
+       rte_free(ring->empty_tx_reqs);
 
        ring->empty_tx_reqs = NULL;
        ring->tx_buffer_info = NULL;
@@ -638,9 +634,9 @@ static int ena_link_update(struct rte_eth_dev *dev,
        struct rte_eth_link *link = &dev->data->dev_link;
        struct ena_adapter *adapter = dev->data->dev_private;
 
-       link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
-       link->link_speed = ETH_SPEED_NUM_NONE;
-       link->link_duplex = ETH_LINK_FULL_DUPLEX;
+       link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+       link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+       link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
        return 0;
 }
@@ -918,7 +914,7 @@ static int ena_start(struct rte_eth_dev *dev)
        if (rc)
                goto err_start_tx;
 
-       if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+       if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
                rc = ena_rss_configure(adapter);
                if (rc)
                        goto err_rss_init;
@@ -950,7 +946,7 @@ static int ena_stop(struct rte_eth_dev *dev)
        struct ena_adapter *adapter = dev->data->dev_private;
        struct ena_com_dev *ena_dev = &adapter->ena_dev;
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        int rc;
 
        /* Cannot free memory in secondary process */
@@ -972,10 +968,9 @@ static int ena_stop(struct rte_eth_dev *dev)
        rte_intr_disable(intr_handle);
 
        rte_intr_efd_disable(intr_handle);
-       if (intr_handle->intr_vec != NULL) {
-               rte_free(intr_handle->intr_vec);
-               intr_handle->intr_vec = NULL;
-       }
+
+       /* Cleanup vector list */
+       rte_intr_vec_list_free(intr_handle);
 
        rte_intr_enable(intr_handle);
 
@@ -991,7 +986,7 @@ static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring)
        struct ena_adapter *adapter = ring->adapter;
        struct ena_com_dev *ena_dev = &adapter->ena_dev;
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct ena_com_create_io_ctx ctx =
                /* policy set to _HOST just to satisfy icc compiler */
                { ENA_ADMIN_PLACEMENT_POLICY_HOST,
@@ -1011,7 +1006,10 @@ static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring)
                ena_qid = ENA_IO_RXQ_IDX(ring->id);
                ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
                if (rte_intr_dp_is_en(intr_handle))
-                       ctx.msix_vector = intr_handle->intr_vec[ring->id];
+                       ctx.msix_vector =
+                               rte_intr_vec_list_index_get(intr_handle,
+                                                                  ring->id);
+
                for (i = 0; i < ring->ring_size; i++)
                        ring->empty_rx_reqs[i] = i;
        }
@@ -1160,20 +1158,22 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
        txq->size_mask = nb_desc - 1;
        txq->numa_socket_id = socket_id;
        txq->pkts_without_db = false;
+       txq->last_cleanup_ticks = 0;
 
-       txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
-                                         sizeof(struct ena_tx_buffer) *
-                                         txq->ring_size,
-                                         RTE_CACHE_LINE_SIZE);
+       txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info",
+               sizeof(struct ena_tx_buffer) * txq->ring_size,
+               RTE_CACHE_LINE_SIZE,
+               socket_id);
        if (!txq->tx_buffer_info) {
                PMD_DRV_LOG(ERR,
                        "Failed to allocate memory for Tx buffer info\n");
                return -ENOMEM;
        }
 
-       txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
-                                        sizeof(u16) * txq->ring_size,
-                                        RTE_CACHE_LINE_SIZE);
+       txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs",
+               sizeof(uint16_t) * txq->ring_size,
+               RTE_CACHE_LINE_SIZE,
+               socket_id);
        if (!txq->empty_tx_reqs) {
                PMD_DRV_LOG(ERR,
                        "Failed to allocate memory for empty Tx requests\n");
@@ -1182,9 +1182,10 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
        }
 
        txq->push_buf_intermediate_buf =
-               rte_zmalloc("txq->push_buf_intermediate_buf",
-                           txq->tx_max_header_size,
-                           RTE_CACHE_LINE_SIZE);
+               rte_zmalloc_socket("txq->push_buf_intermediate_buf",
+                       txq->tx_max_header_size,
+                       RTE_CACHE_LINE_SIZE,
+                       socket_id);
        if (!txq->push_buf_intermediate_buf) {
                PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n");
                rte_free(txq->tx_buffer_info);
@@ -1207,6 +1208,9 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
                        txq->ring_size - ENA_REFILL_THRESH_PACKET);
        }
 
+       txq->missing_tx_completion_threshold =
+               RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP);
+
        /* Store pointer to this queue in upper layer */
        txq->configured = 1;
        dev->data->tx_queues[queue_idx] = txq;
@@ -1266,19 +1270,20 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->numa_socket_id = socket_id;
        rxq->mb_pool = mp;
 
-       rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
+       rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info",
                sizeof(struct ena_rx_buffer) * nb_desc,
-               RTE_CACHE_LINE_SIZE);
+               RTE_CACHE_LINE_SIZE,
+               socket_id);
        if (!rxq->rx_buffer_info) {
                PMD_DRV_LOG(ERR,
                        "Failed to allocate memory for Rx buffer info\n");
                return -ENOMEM;
        }
 
-       rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
-                                           sizeof(struct rte_mbuf *) * nb_desc,
-                                           RTE_CACHE_LINE_SIZE);
-
+       rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer",
+               sizeof(struct rte_mbuf *) * nb_desc,
+               RTE_CACHE_LINE_SIZE,
+               socket_id);
        if (!rxq->rx_refill_buffer) {
                PMD_DRV_LOG(ERR,
                        "Failed to allocate memory for Rx refill buffer\n");
@@ -1287,9 +1292,10 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
-       rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
-                                        sizeof(uint16_t) * nb_desc,
-                                        RTE_CACHE_LINE_SIZE);
+       rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs",
+               sizeof(uint16_t) * nb_desc,
+               RTE_CACHE_LINE_SIZE,
+               socket_id);
        if (!rxq->empty_rx_reqs) {
                PMD_DRV_LOG(ERR,
                        "Failed to allocate memory for empty Rx requests\n");
@@ -1394,7 +1400,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
                ++rxq->rx_stats.refill_partial;
        }
 
-       /* When we submitted free recources to device... */
+       /* When we submitted free resources to device... */
        if (likely(i > 0)) {
                /* ...let HW know that it can fill buffers with data. */
                ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
@@ -1531,6 +1537,87 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
        }
 }
 
+static int check_for_tx_completion_in_queue(struct ena_adapter *adapter,
+                                           struct ena_ring *tx_ring)
+{
+       struct ena_tx_buffer *tx_buf;
+       uint64_t timestamp;
+       uint64_t completion_delay;
+       uint32_t missed_tx = 0;
+       unsigned int i;
+       int rc = 0;
+
+       for (i = 0; i < tx_ring->ring_size; ++i) {
+               tx_buf = &tx_ring->tx_buffer_info[i];
+               timestamp = tx_buf->timestamp;
+
+               if (timestamp == 0)
+                       continue;
+
+               completion_delay = rte_get_timer_cycles() - timestamp;
+               if (completion_delay > adapter->missing_tx_completion_to) {
+                       if (unlikely(!tx_buf->print_once)) {
+                               PMD_TX_LOG(WARNING,
+                                       "Found a Tx that wasn't completed on time, qid %d, index %d. "
+                                       "Missing Tx outstanding for %" PRIu64 " msecs.\n",
+                                       tx_ring->id, i, completion_delay /
+                                       rte_get_timer_hz() * 1000);
+                               tx_buf->print_once = true;
+                       }
+                       ++missed_tx;
+               }
+       }
+
+       if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) {
+               PMD_DRV_LOG(ERR,
+                       "The number of lost Tx completions is above the threshold (%d > %d). "
+                       "Trigger the device reset.\n",
+                       missed_tx,
+                       tx_ring->missing_tx_completion_threshold);
+               adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
+               adapter->trigger_reset = true;
+               rc = -EIO;
+       }
+
+       tx_ring->tx_stats.missed_tx += missed_tx;
+
+       return rc;
+}
+
+static void check_for_tx_completions(struct ena_adapter *adapter)
+{
+       struct ena_ring *tx_ring;
+       uint64_t tx_cleanup_delay;
+       size_t qid;
+       int budget;
+       uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues;
+
+       if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
+               return;
+
+       nb_tx_queues = adapter->edev_data->nb_tx_queues;
+       budget = adapter->missing_tx_completion_budget;
+
+       qid = adapter->last_tx_comp_qid;
+       while (budget-- > 0) {
+               tx_ring = &adapter->tx_ring[qid];
+
+               /* Tx cleanup is called only by the burst function and can be
+                * called dynamically by the application. Also cleanup is
+                * limited by the threshold. To avoid false detection of the
+                * missing HW Tx completion, get the delay since last cleanup
+                * function was called.
+                */
+               tx_cleanup_delay = rte_get_timer_cycles() -
+                       tx_ring->last_cleanup_ticks;
+               if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay)
+                       check_for_tx_completion_in_queue(adapter, tx_ring);
+               qid = (qid + 1) % nb_tx_queues;
+       }
+
+       adapter->last_tx_comp_qid = qid;
+}
+
 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
                                  void *arg)
 {
@@ -1539,6 +1626,7 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
 
        check_for_missing_keep_alive(adapter);
        check_for_admin_com_state(adapter);
+       check_for_tx_completions(adapter);
 
        if (unlikely(adapter->trigger_reset)) {
                PMD_DRV_LOG(ERR, "Trigger reset is on\n");
@@ -1730,7 +1818,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
                     pci_dev->addr.devid,
                     pci_dev->addr.function);
 
-       intr_handle = &pci_dev->intr_handle;
+       intr_handle = pci_dev->intr_handle;
 
        adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
        adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
@@ -1909,12 +1997,32 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 
        adapter->state = ENA_ADAPTER_STATE_CONFIG;
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
-       dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+       dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+
+       /* Scattered Rx cannot be turned off in the HW, so this capability must
+        * be forced.
+        */
+       dev->data->scattered_rx = 1;
+
+       adapter->last_tx_comp_qid = 0;
+
+       adapter->missing_tx_completion_budget =
+               RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues);
+
+       adapter->missing_tx_completion_to = ENA_TX_TIMEOUT;
+       /* To avoid detection of the spurious Tx completion timeout due to
+        * application not calling the Tx cleanup function, set timeout for the
+        * Tx queue which should be half of the missing completion timeout for a
+        * safety. If there will be a lot of missing Tx completions in the
+        * queue, they will be detected sooner or later.
+        */
+       adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2;
 
        adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
        adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
+
        return 0;
 }
 
@@ -1947,12 +2055,65 @@ static void ena_init_rings(struct ena_adapter *adapter,
        }
 }
 
+static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
+{
+       uint64_t port_offloads = 0;
+
+       if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
+               port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
+
+       if (adapter->offloads.rx_offloads &
+           (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
+               port_offloads |=
+                       RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
+
+       if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
+               port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+       port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+
+       return port_offloads;
+}
+
+static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
+{
+       uint64_t port_offloads = 0;
+
+       if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
+               port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
+
+       if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
+               port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
+       if (adapter->offloads.tx_offloads &
+           (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
+            ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
+               port_offloads |=
+                       RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
+
+       port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+
+       return port_offloads;
+}
+
+static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter)
+{
+       RTE_SET_USED(adapter);
+
+       return 0;
+}
+
+static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter)
+{
+       RTE_SET_USED(adapter);
+
+       return 0;
+}
+
 static int ena_infos_get(struct rte_eth_dev *dev,
                          struct rte_eth_dev_info *dev_info)
 {
        struct ena_adapter *adapter;
        struct ena_com_dev *ena_dev;
-       uint64_t rx_feat = 0, tx_feat = 0;
 
        ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
        ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
@@ -1962,41 +2123,20 @@ static int ena_infos_get(struct rte_eth_dev *dev,
        ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
 
        dev_info->speed_capa =
-                       ETH_LINK_SPEED_1G   |
-                       ETH_LINK_SPEED_2_5G |
-                       ETH_LINK_SPEED_5G   |
-                       ETH_LINK_SPEED_10G  |
-                       ETH_LINK_SPEED_25G  |
-                       ETH_LINK_SPEED_40G  |
-                       ETH_LINK_SPEED_50G  |
-                       ETH_LINK_SPEED_100G;
-
-       /* Set Tx & Rx features available for device */
-       if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
-               tx_feat |= DEV_TX_OFFLOAD_TCP_TSO;
-
-       if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
-               tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM;
-       if (adapter->offloads.tx_offloads &
-           (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
-            ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
-               tx_feat |= DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
-
-       if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
-               rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM;
-       if (adapter->offloads.rx_offloads &
-           (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
-               rx_feat |= DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
-
-       tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS;
+                       RTE_ETH_LINK_SPEED_1G   |
+                       RTE_ETH_LINK_SPEED_2_5G |
+                       RTE_ETH_LINK_SPEED_5G   |
+                       RTE_ETH_LINK_SPEED_10G  |
+                       RTE_ETH_LINK_SPEED_25G  |
+                       RTE_ETH_LINK_SPEED_40G  |
+                       RTE_ETH_LINK_SPEED_50G  |
+                       RTE_ETH_LINK_SPEED_100G;
 
        /* Inform framework about available features */
-       dev_info->rx_offload_capa = rx_feat;
-       if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
-               dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
-       dev_info->rx_queue_offload_capa = rx_feat;
-       dev_info->tx_offload_capa = tx_feat;
-       dev_info->tx_queue_offload_capa = tx_feat;
+       dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
+       dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter);
+       dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter);
+       dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter);
 
        dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF;
        dev_info->hash_key_size = ENA_HASH_KEY_SIZE;
@@ -2012,9 +2152,6 @@ static int ena_infos_get(struct rte_eth_dev *dev,
        dev_info->max_tx_queues = adapter->max_num_io_queues;
        dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
 
-       adapter->tx_supported_offloads = tx_feat;
-       adapter->rx_supported_offloads = rx_feat;
-
        dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
        dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
        dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
@@ -2159,7 +2296,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        }
 #endif
 
-       fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH;
+       fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        descs_in_use = rx_ring->ring_size -
                ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
@@ -2210,7 +2347,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx, fill_hash);
 
                if (unlikely(mbuf->ol_flags &
-                               (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) {
+                               (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) {
                        rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
                        ++rx_ring->rx_stats.bad_csum;
                }
@@ -2258,10 +2395,10 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                if (ol_flags == 0)
                        continue;
 
-               l4_csum_flag = ol_flags & PKT_TX_L4_MASK;
+               l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK;
                /* SCTP checksum offload is not supported by the ENA. */
                if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) ||
-                   l4_csum_flag == PKT_TX_SCTP_CKSUM) {
+                   l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) {
                        PMD_TX_LOG(DEBUG,
                                "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n",
                                i, ol_flags);
@@ -2269,14 +2406,25 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        return i;
                }
 
+               if (unlikely(m->nb_segs >= tx_ring->sgl_size &&
+                   !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
+                     m->nb_segs == tx_ring->sgl_size &&
+                     m->data_len < tx_ring->tx_max_header_size))) {
+                       PMD_TX_LOG(DEBUG,
+                               "mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n",
+                               i, m->nb_segs);
+                       rte_errno = EINVAL;
+                       return i;
+               }
+
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
                /* Check if requested offload is also enabled for the queue */
-               if ((ol_flags & PKT_TX_IP_CKSUM &&
-                    !(tx_ring->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) ||
-                   (l4_csum_flag == PKT_TX_TCP_CKSUM &&
-                    !(tx_ring->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) ||
-                   (l4_csum_flag == PKT_TX_UDP_CKSUM &&
-                    !(tx_ring->offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) {
+               if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM &&
+                    !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ||
+                   (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM &&
+                    !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) ||
+                   (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM &&
+                    !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) {
                        PMD_TX_LOG(DEBUG,
                                "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
                                i, m->nb_segs, tx_ring->id);
@@ -2287,7 +2435,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                /* The caller is obligated to set l2 and l3 len if any cksum
                 * offload is enabled.
                 */
-               if (unlikely(ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK) &&
+               if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) &&
                    (m->l2_len == 0 || m->l3_len == 0))) {
                        PMD_TX_LOG(DEBUG,
                                "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n",
@@ -2306,14 +2454,14 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 * pseudo header checksum is needed.
                 */
                need_pseudo_csum = false;
-               if (ol_flags & PKT_TX_IPV4) {
-                       if (ol_flags & PKT_TX_IP_CKSUM &&
+               if (ol_flags & RTE_MBUF_F_TX_IPV4) {
+                       if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM &&
                            !(dev_offload_capa & ENA_L3_IPV4_CSUM)) {
                                rte_errno = ENOTSUP;
                                return i;
                        }
 
-                       if (ol_flags & PKT_TX_TCP_SEG &&
+                       if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
                            !(dev_offload_capa & ENA_IPV4_TSO)) {
                                rte_errno = ENOTSUP;
                                return i;
@@ -2322,7 +2470,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        /* Check HW capabilities and if pseudo csum is needed
                         * for L4 offloads.
                         */
-                       if (l4_csum_flag != PKT_TX_L4_NO_CKSUM &&
+                       if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM &&
                            !(dev_offload_capa & ENA_L4_IPV4_CSUM)) {
                                if (dev_offload_capa &
                                    ENA_L4_IPV4_CSUM_PARTIAL) {
@@ -2339,22 +2487,22 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
                        if (frag_field & RTE_IPV4_HDR_DF_FLAG) {
                                m->packet_type |= RTE_PTYPE_L4_NONFRAG;
-                       } else if (ol_flags & PKT_TX_TCP_SEG) {
+                       } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
                                /* In case we are supposed to TSO and have DF
                                 * not set (DF=0) hardware must be provided with
                                 * partial checksum.
                                 */
                                need_pseudo_csum = true;
                        }
-               } else if (ol_flags & PKT_TX_IPV6) {
+               } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
                        /* There is no support for IPv6 TSO as for now. */
-                       if (ol_flags & PKT_TX_TCP_SEG) {
+                       if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
                                rte_errno = ENOTSUP;
                                return i;
                        }
 
                        /* Check HW capabilities and if pseudo csum is needed */
-                       if (l4_csum_flag != PKT_TX_L4_NO_CKSUM &&
+                       if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM &&
                            !(dev_offload_capa & ENA_L4_IPV6_CSUM)) {
                                if (dev_offload_capa &
                                    ENA_L4_IPV6_CSUM_PARTIAL) {
@@ -2390,6 +2538,20 @@ static void ena_update_hints(struct ena_adapter *adapter,
                adapter->ena_dev.mmio_read.reg_read_to =
                        hints->mmio_read_timeout * 1000;
 
+       if (hints->missing_tx_completion_timeout) {
+               if (hints->missing_tx_completion_timeout ==
+                   ENA_HW_HINTS_NO_TIMEOUT) {
+                       adapter->missing_tx_completion_to =
+                               ENA_HW_HINTS_NO_TIMEOUT;
+               } else {
+                       /* Convert from msecs to ticks */
+                       adapter->missing_tx_completion_to = rte_get_timer_hz() *
+                               hints->missing_tx_completion_timeout / 1000;
+                       adapter->tx_cleanup_stall_delay =
+                               adapter->missing_tx_completion_to / 2;
+               }
+       }
+
        if (hints->driver_watchdog_timeout) {
                if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
                        adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
@@ -2401,56 +2563,6 @@ static void ena_update_hints(struct ena_adapter *adapter,
        }
 }
 
-static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring,
-                                             struct rte_mbuf *mbuf)
-{
-       struct ena_com_dev *ena_dev;
-       int num_segments, header_len, rc;
-
-       ena_dev = &tx_ring->adapter->ena_dev;
-       num_segments = mbuf->nb_segs;
-       header_len = mbuf->data_len;
-
-       if (likely(num_segments < tx_ring->sgl_size))
-               goto checkspace;
-
-       if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
-           (num_segments == tx_ring->sgl_size) &&
-           (header_len < tx_ring->tx_max_header_size))
-               goto checkspace;
-
-       /* Checking for space for 2 additional metadata descriptors due to
-        * possible header split and metadata descriptor. Linearization will
-        * be needed so we reduce the segments number from num_segments to 1
-        */
-       if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) {
-               PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n");
-               return ENA_COM_NO_MEM;
-       }
-       ++tx_ring->tx_stats.linearize;
-       rc = rte_pktmbuf_linearize(mbuf);
-       if (unlikely(rc)) {
-               PMD_TX_LOG(WARNING, "Mbuf linearize failed\n");
-               rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);
-               ++tx_ring->tx_stats.linearize_failed;
-               return rc;
-       }
-
-       return 0;
-
-checkspace:
-       /* Checking for space for 2 additional metadata descriptors due to
-        * possible header split and metadata descriptor
-        */
-       if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
-                                         num_segments + 2)) {
-               PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n");
-               return ENA_COM_NO_MEM;
-       }
-
-       return 0;
-}
-
 static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
        struct ena_tx_buffer *tx_info,
        struct rte_mbuf *mbuf,
@@ -2535,9 +2647,14 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
        int nb_hw_desc;
        int rc;
 
-       rc = ena_check_space_and_linearize_mbuf(tx_ring, mbuf);
-       if (unlikely(rc))
-               return rc;
+       /* Checking for space for 2 additional metadata descriptors due to
+        * possible header split and metadata descriptor
+        */
+       if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+                                         mbuf->nb_segs + 2)) {
+               PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n");
+               return ENA_COM_NO_MEM;
+       }
 
        next_to_use = tx_ring->next_to_use;
 
@@ -2580,6 +2697,7 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
        }
 
        tx_info->tx_descs = nb_hw_desc;
+       tx_info->timestamp = rte_get_timer_cycles();
 
        tx_ring->tx_stats.cnt++;
        tx_ring->tx_stats.bytes += mbuf->pkt_len;
@@ -2612,6 +2730,7 @@ static void ena_tx_cleanup(struct ena_ring *tx_ring)
 
                /* Get Tx info & store how many descs were processed  */
                tx_info = &tx_ring->tx_buffer_info[req_id];
+               tx_info->timestamp = 0;
 
                mbuf = tx_info->mbuf;
                rte_pktmbuf_free(mbuf);
@@ -2632,6 +2751,9 @@ static void ena_tx_cleanup(struct ena_ring *tx_ring)
                ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
                ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
        }
+
+       /* Notify completion handler that the cleanup was just called */
+       tx_ring->last_cleanup_ticks = rte_get_timer_cycles();
 }
 
 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -2950,7 +3072,7 @@ static int ena_parse_devargs(struct ena_adapter *adapter,
 static int ena_setup_rx_intr(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        int rc;
        uint16_t vectors_nb, i;
        bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq;
@@ -2977,9 +3099,9 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev)
                goto enable_intr;
        }
 
-       intr_handle->intr_vec = rte_zmalloc("intr_vec",
-               dev->data->nb_rx_queues * sizeof(*intr_handle->intr_vec), 0);
-       if (intr_handle->intr_vec == NULL) {
+       /* Allocate the vector list */
+       if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+                                          dev->data->nb_rx_queues)) {
                PMD_DRV_LOG(ERR,
                        "Failed to allocate interrupt vector for %d queues\n",
                        dev->data->nb_rx_queues);
@@ -2998,7 +3120,9 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev)
        }
 
        for (i = 0; i < vectors_nb; ++i)
-               intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
+               if (rte_intr_vec_list_index_set(intr_handle, i,
+                                          RTE_INTR_VEC_RXTX_OFFSET + i))
+                       goto disable_intr_efd;
 
        rte_intr_enable(intr_handle);
        return 0;
@@ -3006,8 +3130,7 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev)
 disable_intr_efd:
        rte_intr_efd_disable(intr_handle);
 free_intr_vec:
-       rte_free(intr_handle->intr_vec);
-       intr_handle->intr_vec = NULL;
+       rte_intr_vec_list_free(intr_handle);
 enable_intr:
        rte_intr_enable(intr_handle);
        return rc;