net/mlx5: support more tunnel types
[dpdk.git] / drivers / net / ena / ena_ethdev.c
index a82d4b6..f3b17d7 100644 (file)
@@ -21,7 +21,7 @@
 #include <ena_eth_io_defs.h>
 
 #define DRV_MODULE_VER_MAJOR   2
-#define DRV_MODULE_VER_MINOR   4
+#define DRV_MODULE_VER_MINOR   5
 #define DRV_MODULE_VER_SUBMINOR        0
 
 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
@@ -99,6 +99,7 @@ static const struct ena_stats ena_stats_tx_strings[] = {
        ENA_STAT_TX_ENTRY(doorbells),
        ENA_STAT_TX_ENTRY(bad_req_id),
        ENA_STAT_TX_ENTRY(available_desc),
+       ENA_STAT_TX_ENTRY(missed_tx),
 };
 
 static const struct ena_stats ena_stats_rx_strings[] = {
@@ -140,6 +141,23 @@ static const struct ena_stats ena_stats_rx_strings[] = {
 #define        ENA_TX_OFFLOAD_NOTSUP_MASK      \
        (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
 
+/** HW specific offloads capabilities. */
+/* IPv4 checksum offload. */
+#define ENA_L3_IPV4_CSUM               0x0001
+/* TCP/UDP checksum offload for IPv4 packets. */
+#define ENA_L4_IPV4_CSUM               0x0002
+/* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */
+#define ENA_L4_IPV4_CSUM_PARTIAL       0x0004
+/* TCP/UDP checksum offload for IPv6 packets. */
+#define ENA_L4_IPV6_CSUM               0x0008
+/* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */
+#define ENA_L4_IPV6_CSUM_PARTIAL       0x0010
+/* TSO support for IPv4 packets. */
+#define ENA_IPV4_TSO                   0x0020
+
+/* Device supports setting RSS hash. */
+#define ENA_RX_RSS_HASH                        0x0040
+
 static const struct rte_pci_id pci_id_ena_map[] = {
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) },
@@ -206,6 +224,10 @@ static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring);
 static int ena_queue_start_all(struct rte_eth_dev *dev,
                               enum ena_ring_type ring_type);
 static void ena_stats_restart(struct rte_eth_dev *dev);
+static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter);
 static int ena_infos_get(struct rte_eth_dev *dev,
                         struct rte_eth_dev_info *dev_info);
 static void ena_interrupt_handler_rte(void *cb_arg);
@@ -677,26 +699,14 @@ err:
        return rc;
 }
 
-static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
-{
-       uint32_t max_frame_len = adapter->max_mtu;
-
-       if (adapter->edev_data->dev_conf.rxmode.offloads &
-           DEV_RX_OFFLOAD_JUMBO_FRAME)
-               max_frame_len =
-                       adapter->edev_data->dev_conf.rxmode.max_rx_pkt_len;
-
-       return max_frame_len;
-}
-
 static int ena_check_valid_conf(struct ena_adapter *adapter)
 {
-       uint32_t max_frame_len = ena_get_mtu_conf(adapter);
+       uint32_t mtu = adapter->edev_data->mtu;
 
-       if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
+       if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) {
                PMD_INIT_LOG(ERR,
                        "Unsupported MTU of %d. Max MTU: %d, min MTU: %d\n",
-                       max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
+                       mtu, adapter->max_mtu, ENA_MIN_MTU);
                return ENA_COM_UNSUPPORTED;
        }
 
@@ -869,10 +879,10 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        ena_dev = &adapter->ena_dev;
        ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
 
-       if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
+       if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) {
                PMD_DRV_LOG(ERR,
                        "Invalid MTU setting. New MTU: %d, max MTU: %d, min MTU: %d\n",
-                       mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
+                       mtu, adapter->max_mtu, ENA_MIN_MTU);
                return -EINVAL;
        }
 
@@ -1123,6 +1133,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
        struct ena_ring *txq = NULL;
        struct ena_adapter *adapter = dev->data->dev_private;
        unsigned int i;
+       uint16_t dyn_thresh;
 
        txq = &adapter->tx_ring[queue_idx];
 
@@ -1154,20 +1165,22 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
        txq->size_mask = nb_desc - 1;
        txq->numa_socket_id = socket_id;
        txq->pkts_without_db = false;
+       txq->last_cleanup_ticks = 0;
 
-       txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
-                                         sizeof(struct ena_tx_buffer) *
-                                         txq->ring_size,
-                                         RTE_CACHE_LINE_SIZE);
+       txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info",
+               sizeof(struct ena_tx_buffer) * txq->ring_size,
+               RTE_CACHE_LINE_SIZE,
+               socket_id);
        if (!txq->tx_buffer_info) {
                PMD_DRV_LOG(ERR,
                        "Failed to allocate memory for Tx buffer info\n");
                return -ENOMEM;
        }
 
-       txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
-                                        sizeof(u16) * txq->ring_size,
-                                        RTE_CACHE_LINE_SIZE);
+       txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs",
+               sizeof(uint16_t) * txq->ring_size,
+               RTE_CACHE_LINE_SIZE,
+               socket_id);
        if (!txq->empty_tx_reqs) {
                PMD_DRV_LOG(ERR,
                        "Failed to allocate memory for empty Tx requests\n");
@@ -1176,9 +1189,10 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
        }
 
        txq->push_buf_intermediate_buf =
-               rte_zmalloc("txq->push_buf_intermediate_buf",
-                           txq->tx_max_header_size,
-                           RTE_CACHE_LINE_SIZE);
+               rte_zmalloc_socket("txq->push_buf_intermediate_buf",
+                       txq->tx_max_header_size,
+                       RTE_CACHE_LINE_SIZE,
+                       socket_id);
        if (!txq->push_buf_intermediate_buf) {
                PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n");
                rte_free(txq->tx_buffer_info);
@@ -1189,10 +1203,21 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
        for (i = 0; i < txq->ring_size; i++)
                txq->empty_tx_reqs[i] = i;
 
-       if (tx_conf != NULL) {
-               txq->offloads =
-                       tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+       txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+       /* Check if caller provided the Tx cleanup threshold value. */
+       if (tx_conf->tx_free_thresh != 0) {
+               txq->tx_free_thresh = tx_conf->tx_free_thresh;
+       } else {
+               dyn_thresh = txq->ring_size -
+                       txq->ring_size / ENA_REFILL_THRESH_DIVIDER;
+               txq->tx_free_thresh = RTE_MAX(dyn_thresh,
+                       txq->ring_size - ENA_REFILL_THRESH_PACKET);
        }
+
+       txq->missing_tx_completion_threshold =
+               RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP);
+
        /* Store pointer to this queue in upper layer */
        txq->configured = 1;
        dev->data->tx_queues[queue_idx] = txq;
@@ -1211,6 +1236,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
        struct ena_ring *rxq = NULL;
        size_t buffer_size;
        int i;
+       uint16_t dyn_thresh;
 
        rxq = &adapter->rx_ring[queue_idx];
        if (rxq->configured) {
@@ -1251,19 +1277,20 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->numa_socket_id = socket_id;
        rxq->mb_pool = mp;
 
-       rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
+       rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info",
                sizeof(struct ena_rx_buffer) * nb_desc,
-               RTE_CACHE_LINE_SIZE);
+               RTE_CACHE_LINE_SIZE,
+               socket_id);
        if (!rxq->rx_buffer_info) {
                PMD_DRV_LOG(ERR,
                        "Failed to allocate memory for Rx buffer info\n");
                return -ENOMEM;
        }
 
-       rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
-                                           sizeof(struct rte_mbuf *) * nb_desc,
-                                           RTE_CACHE_LINE_SIZE);
-
+       rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer",
+               sizeof(struct rte_mbuf *) * nb_desc,
+               RTE_CACHE_LINE_SIZE,
+               socket_id);
        if (!rxq->rx_refill_buffer) {
                PMD_DRV_LOG(ERR,
                        "Failed to allocate memory for Rx refill buffer\n");
@@ -1272,9 +1299,10 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
-       rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
-                                        sizeof(uint16_t) * nb_desc,
-                                        RTE_CACHE_LINE_SIZE);
+       rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs",
+               sizeof(uint16_t) * nb_desc,
+               RTE_CACHE_LINE_SIZE,
+               socket_id);
        if (!rxq->empty_rx_reqs) {
                PMD_DRV_LOG(ERR,
                        "Failed to allocate memory for empty Rx requests\n");
@@ -1290,6 +1318,14 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 
        rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
+       if (rx_conf->rx_free_thresh != 0) {
+               rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+       } else {
+               dyn_thresh = rxq->ring_size / ENA_REFILL_THRESH_DIVIDER;
+               rxq->rx_free_thresh = RTE_MIN(dyn_thresh,
+                       (uint16_t)(ENA_REFILL_THRESH_PACKET));
+       }
+
        /* Store pointer to this queue in upper layer */
        rxq->configured = 1;
        dev->data->rx_queues[queue_idx] = rxq;
@@ -1508,6 +1544,87 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
        }
 }
 
+static int check_for_tx_completion_in_queue(struct ena_adapter *adapter,
+                                           struct ena_ring *tx_ring)
+{
+       struct ena_tx_buffer *tx_buf;
+       uint64_t timestamp;
+       uint64_t completion_delay;
+       uint32_t missed_tx = 0;
+       unsigned int i;
+       int rc = 0;
+
+       for (i = 0; i < tx_ring->ring_size; ++i) {
+               tx_buf = &tx_ring->tx_buffer_info[i];
+               timestamp = tx_buf->timestamp;
+
+               if (timestamp == 0)
+                       continue;
+
+               completion_delay = rte_get_timer_cycles() - timestamp;
+               if (completion_delay > adapter->missing_tx_completion_to) {
+                       if (unlikely(!tx_buf->print_once)) {
+                               PMD_TX_LOG(WARNING,
+                                       "Found a Tx that wasn't completed on time, qid %d, index %d. "
+                                       "Missing Tx outstanding for %" PRIu64 " msecs.\n",
+                                       tx_ring->id, i, completion_delay /
+                                       rte_get_timer_hz() * 1000);
+                               tx_buf->print_once = true;
+                       }
+                       ++missed_tx;
+               }
+       }
+
+       if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) {
+               PMD_DRV_LOG(ERR,
+                       "The number of lost Tx completions is above the threshold (%d > %d). "
+                       "Trigger the device reset.\n",
+                       missed_tx,
+                       tx_ring->missing_tx_completion_threshold);
+               adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
+               adapter->trigger_reset = true;
+               rc = -EIO;
+       }
+
+       tx_ring->tx_stats.missed_tx += missed_tx;
+
+       return rc;
+}
+
+static void check_for_tx_completions(struct ena_adapter *adapter)
+{
+       struct ena_ring *tx_ring;
+       uint64_t tx_cleanup_delay;
+       size_t qid;
+       int budget;
+       uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues;
+
+       if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
+               return;
+
+       nb_tx_queues = adapter->edev_data->nb_tx_queues;
+       budget = adapter->missing_tx_completion_budget;
+
+       qid = adapter->last_tx_comp_qid;
+       while (budget-- > 0) {
+               tx_ring = &adapter->tx_ring[qid];
+
+               /* Tx cleanup is called only by the burst function and can be
+                * called dynamically by the application. Also cleanup is
+                * limited by the threshold. To avoid false detection of the
+                * missing HW Tx completion, get the delay since last cleanup
+                * function was called.
+                */
+               tx_cleanup_delay = rte_get_timer_cycles() -
+                       tx_ring->last_cleanup_ticks;
+               if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay)
+                       check_for_tx_completion_in_queue(adapter, tx_ring);
+               qid = (qid + 1) % nb_tx_queues;
+       }
+
+       adapter->last_tx_comp_qid = qid;
+}
+
 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
                                  void *arg)
 {
@@ -1516,6 +1633,7 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
 
        check_for_missing_keep_alive(adapter);
        check_for_admin_com_state(adapter);
+       check_for_tx_completions(adapter);
 
        if (unlikely(adapter->trigger_reset)) {
                PMD_DRV_LOG(ERR, "Trigger reset is on\n");
@@ -1624,6 +1742,50 @@ static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
        return max_num_io_queues;
 }
 
+static void
+ena_set_offloads(struct ena_offloads *offloads,
+                struct ena_admin_feature_offload_desc *offload_desc)
+{
+       if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
+               offloads->tx_offloads |= ENA_IPV4_TSO;
+
+       /* Tx IPv4 checksum offloads */
+       if (offload_desc->tx &
+           ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)
+               offloads->tx_offloads |= ENA_L3_IPV4_CSUM;
+       if (offload_desc->tx &
+           ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK)
+               offloads->tx_offloads |= ENA_L4_IPV4_CSUM;
+       if (offload_desc->tx &
+           ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
+               offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL;
+
+       /* Tx IPv6 checksum offloads */
+       if (offload_desc->tx &
+           ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK)
+               offloads->tx_offloads |= ENA_L4_IPV6_CSUM;
+       if (offload_desc->tx &
+            ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
+               offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL;
+
+       /* Rx IPv4 checksum offloads */
+       if (offload_desc->rx_supported &
+           ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)
+               offloads->rx_offloads |= ENA_L3_IPV4_CSUM;
+       if (offload_desc->rx_supported &
+           ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
+               offloads->rx_offloads |= ENA_L4_IPV4_CSUM;
+
+       /* Rx IPv6 checksum offloads */
+       if (offload_desc->rx_supported &
+           ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
+               offloads->rx_offloads |= ENA_L4_IPV6_CSUM;
+
+       if (offload_desc->rx_supported &
+           ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK)
+               offloads->rx_offloads |= ENA_RX_RSS_HASH;
+}
+
 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
 {
        struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
@@ -1745,17 +1907,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
        /* Set max MTU for this device */
        adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
 
-       /* set device support for offloads */
-       adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx &
-               ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0;
-       adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx &
-               ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0;
-       adapter->offloads.rx_csum_supported =
-               (get_feat_ctx.offload.rx_supported &
-               ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0;
-       adapter->offloads.rss_hash_supported =
-               (get_feat_ctx.offload.rx_supported &
-               ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) != 0;
+       ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload);
 
        /* Copy MAC address and point DPDK to it */
        eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr;
@@ -1856,8 +2008,28 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
                dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
        dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
 
+       /* Scattered Rx cannot be turned off in the HW, so this capability must
+        * be forced.
+        */
+       dev->data->scattered_rx = 1;
+
+       adapter->last_tx_comp_qid = 0;
+
+       adapter->missing_tx_completion_budget =
+               RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues);
+
+       adapter->missing_tx_completion_to = ENA_TX_TIMEOUT;
+       /* To avoid detection of the spurious Tx completion timeout due to
+        * application not calling the Tx cleanup function, set timeout for the
+        * Tx queue which should be half of the missing completion timeout for a
+        * safety. If there will be a lot of missing Tx completions in the
+        * queue, they will be detected sooner or later.
+        */
+       adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2;
+
        adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
        adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
+
        return 0;
 }
 
@@ -1890,12 +2062,65 @@ static void ena_init_rings(struct ena_adapter *adapter,
        }
 }
 
+static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
+{
+       uint64_t port_offloads = 0;
+
+       if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
+               port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+
+       if (adapter->offloads.rx_offloads &
+           (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
+               port_offloads |=
+                       DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
+
+       if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
+               port_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+       port_offloads |= DEV_RX_OFFLOAD_SCATTER;
+
+       return port_offloads;
+}
+
+static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
+{
+       uint64_t port_offloads = 0;
+
+       if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
+               port_offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+
+       if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
+               port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+       if (adapter->offloads.tx_offloads &
+           (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
+            ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
+               port_offloads |=
+                       DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
+
+       port_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
+       return port_offloads;
+}
+
+static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter)
+{
+       RTE_SET_USED(adapter);
+
+       return 0;
+}
+
+static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter)
+{
+       RTE_SET_USED(adapter);
+
+       return 0;
+}
+
 static int ena_infos_get(struct rte_eth_dev *dev,
                          struct rte_eth_dev_info *dev_info)
 {
        struct ena_adapter *adapter;
        struct ena_com_dev *ena_dev;
-       uint64_t rx_feat = 0, tx_feat = 0;
 
        ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
        ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
@@ -1914,45 +2139,26 @@ static int ena_infos_get(struct rte_eth_dev *dev,
                        ETH_LINK_SPEED_50G  |
                        ETH_LINK_SPEED_100G;
 
-       /* Set Tx & Rx features available for device */
-       if (adapter->offloads.tso4_supported)
-               tx_feat |= DEV_TX_OFFLOAD_TCP_TSO;
-
-       if (adapter->offloads.tx_csum_supported)
-               tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-                       DEV_TX_OFFLOAD_UDP_CKSUM |
-                       DEV_TX_OFFLOAD_TCP_CKSUM;
-
-       if (adapter->offloads.rx_csum_supported)
-               rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
-                       DEV_RX_OFFLOAD_UDP_CKSUM  |
-                       DEV_RX_OFFLOAD_TCP_CKSUM;
-
-       rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-       tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS;
-
        /* Inform framework about available features */
-       dev_info->rx_offload_capa = rx_feat;
-       if (adapter->offloads.rss_hash_supported)
-               dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
-       dev_info->rx_queue_offload_capa = rx_feat;
-       dev_info->tx_offload_capa = tx_feat;
-       dev_info->tx_queue_offload_capa = tx_feat;
+       dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
+       dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter);
+       dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter);
+       dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter);
 
        dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF;
        dev_info->hash_key_size = ENA_HASH_KEY_SIZE;
 
        dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
-       dev_info->max_rx_pktlen  = adapter->max_mtu;
+       dev_info->max_rx_pktlen  = adapter->max_mtu + RTE_ETHER_HDR_LEN +
+               RTE_ETHER_CRC_LEN;
+       dev_info->min_mtu = ENA_MIN_MTU;
+       dev_info->max_mtu = adapter->max_mtu;
        dev_info->max_mac_addrs = 1;
 
        dev_info->max_rx_queues = adapter->max_num_io_queues;
        dev_info->max_tx_queues = adapter->max_num_io_queues;
        dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
 
-       adapter->tx_supported_offloads = tx_feat;
-       adapter->rx_supported_offloads = rx_feat;
-
        dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
        dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
        dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
@@ -2080,7 +2286,6 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 {
        struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
        unsigned int free_queue_entries;
-       unsigned int refill_threshold;
        uint16_t next_to_clean = rx_ring->next_to_clean;
        uint16_t descs_in_use;
        struct rte_mbuf *mbuf;
@@ -2162,12 +2367,9 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        rx_ring->next_to_clean = next_to_clean;
 
        free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
-       refill_threshold =
-               RTE_MIN(rx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
-               (unsigned int)ENA_REFILL_THRESH_PACKET);
 
        /* Burst refill to save doorbells, memory barriers, const interval */
-       if (free_queue_entries > refill_threshold) {
+       if (free_queue_entries >= rx_ring->rx_free_thresh) {
                ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
                ena_populate_rx_queue(rx_ring, free_queue_entries);
        }
@@ -2183,45 +2385,60 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint32_t i;
        struct rte_mbuf *m;
        struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
+       struct ena_adapter *adapter = tx_ring->adapter;
        struct rte_ipv4_hdr *ip_hdr;
        uint64_t ol_flags;
+       uint64_t l4_csum_flag;
+       uint64_t dev_offload_capa;
        uint16_t frag_field;
+       bool need_pseudo_csum;
 
+       dev_offload_capa = adapter->offloads.tx_offloads;
        for (i = 0; i != nb_pkts; i++) {
                m = tx_pkts[i];
                ol_flags = m->ol_flags;
 
-               if (!(ol_flags & PKT_TX_IPV4))
+               /* Check if any offload flag was set */
+               if (ol_flags == 0)
                        continue;
 
-               /* If there was not L2 header length specified, assume it is
-                * length of the ethernet header.
-                */
-               if (unlikely(m->l2_len == 0))
-                       m->l2_len = sizeof(struct rte_ether_hdr);
-
-               ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
-                                                m->l2_len);
-               frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
-
-               if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) {
-                       m->packet_type |= RTE_PTYPE_L4_NONFRAG;
-
-                       /* If IPv4 header has DF flag enabled and TSO support is
-                        * disabled, partial chcecksum should not be calculated.
-                        */
-                       if (!tx_ring->adapter->offloads.tso4_supported)
-                               continue;
-               }
-
-               if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
-                               (ol_flags & PKT_TX_L4_MASK) ==
-                               PKT_TX_SCTP_CKSUM) {
+               l4_csum_flag = ol_flags & PKT_TX_L4_MASK;
+               /* SCTP checksum offload is not supported by the ENA. */
+               if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) ||
+                   l4_csum_flag == PKT_TX_SCTP_CKSUM) {
+                       PMD_TX_LOG(DEBUG,
+                               "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n",
+                               i, ol_flags);
                        rte_errno = ENOTSUP;
                        return i;
                }
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
+               /* Check if requested offload is also enabled for the queue */
+               if ((ol_flags & PKT_TX_IP_CKSUM &&
+                    !(tx_ring->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) ||
+                   (l4_csum_flag == PKT_TX_TCP_CKSUM &&
+                    !(tx_ring->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) ||
+                   (l4_csum_flag == PKT_TX_UDP_CKSUM &&
+                    !(tx_ring->offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) {
+                       PMD_TX_LOG(DEBUG,
+                               "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
+                               i, m->nb_segs, tx_ring->id);
+                       rte_errno = EINVAL;
+                       return i;
+               }
+
+               /* The caller is obligated to set l2 and l3 len if any cksum
+                * offload is enabled.
+                */
+               if (unlikely(ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK) &&
+                   (m->l2_len == 0 || m->l3_len == 0))) {
+                       PMD_TX_LOG(DEBUG,
+                               "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n",
+                               i);
+                       rte_errno = EINVAL;
+                       return i;
+               }
                ret = rte_validate_tx_offload(m);
                if (ret != 0) {
                        rte_errno = -ret;
@@ -2229,16 +2446,76 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                }
 #endif
 
-               /* In case we are supposed to TSO and have DF not set (DF=0)
-                * hardware must be provided with partial checksum, otherwise
-                * it will take care of necessary calculations.
+               /* Verify HW support for requested offloads and determine if
+                * pseudo header checksum is needed.
                 */
+               need_pseudo_csum = false;
+               if (ol_flags & PKT_TX_IPV4) {
+                       if (ol_flags & PKT_TX_IP_CKSUM &&
+                           !(dev_offload_capa & ENA_L3_IPV4_CSUM)) {
+                               rte_errno = ENOTSUP;
+                               return i;
+                       }
 
-               ret = rte_net_intel_cksum_flags_prepare(m,
-                       ol_flags & ~PKT_TX_TCP_SEG);
-               if (ret != 0) {
-                       rte_errno = -ret;
-                       return i;
+                       if (ol_flags & PKT_TX_TCP_SEG &&
+                           !(dev_offload_capa & ENA_IPV4_TSO)) {
+                               rte_errno = ENOTSUP;
+                               return i;
+                       }
+
+                       /* Check HW capabilities and if pseudo csum is needed
+                        * for L4 offloads.
+                        */
+                       if (l4_csum_flag != PKT_TX_L4_NO_CKSUM &&
+                           !(dev_offload_capa & ENA_L4_IPV4_CSUM)) {
+                               if (dev_offload_capa &
+                                   ENA_L4_IPV4_CSUM_PARTIAL) {
+                                       need_pseudo_csum = true;
+                               } else {
+                                       rte_errno = ENOTSUP;
+                                       return i;
+                               }
+                       }
+
+                       /* Parse the DF flag */
+                       ip_hdr = rte_pktmbuf_mtod_offset(m,
+                               struct rte_ipv4_hdr *, m->l2_len);
+                       frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
+                       if (frag_field & RTE_IPV4_HDR_DF_FLAG) {
+                               m->packet_type |= RTE_PTYPE_L4_NONFRAG;
+                       } else if (ol_flags & PKT_TX_TCP_SEG) {
+                               /* In case we are supposed to TSO and have DF
+                                * not set (DF=0) hardware must be provided with
+                                * partial checksum.
+                                */
+                               need_pseudo_csum = true;
+                       }
+               } else if (ol_flags & PKT_TX_IPV6) {
+                       /* There is no support for IPv6 TSO as for now. */
+                       if (ol_flags & PKT_TX_TCP_SEG) {
+                               rte_errno = ENOTSUP;
+                               return i;
+                       }
+
+                       /* Check HW capabilities and if pseudo csum is needed */
+                       if (l4_csum_flag != PKT_TX_L4_NO_CKSUM &&
+                           !(dev_offload_capa & ENA_L4_IPV6_CSUM)) {
+                               if (dev_offload_capa &
+                                   ENA_L4_IPV6_CSUM_PARTIAL) {
+                                       need_pseudo_csum = true;
+                               } else {
+                                       rte_errno = ENOTSUP;
+                                       return i;
+                               }
+                       }
+               }
+
+               if (need_pseudo_csum) {
+                       ret = rte_net_intel_cksum_flags_prepare(m, ol_flags);
+                       if (ret != 0) {
+                               rte_errno = -ret;
+                               return i;
+                       }
                }
        }
 
@@ -2257,6 +2534,20 @@ static void ena_update_hints(struct ena_adapter *adapter,
                adapter->ena_dev.mmio_read.reg_read_to =
                        hints->mmio_read_timeout * 1000;
 
+       if (hints->missing_tx_completion_timeout) {
+               if (hints->missing_tx_completion_timeout ==
+                   ENA_HW_HINTS_NO_TIMEOUT) {
+                       adapter->missing_tx_completion_to =
+                               ENA_HW_HINTS_NO_TIMEOUT;
+               } else {
+                       /* Convert from msecs to ticks */
+                       adapter->missing_tx_completion_to = rte_get_timer_hz() *
+                               hints->missing_tx_completion_timeout / 1000;
+                       adapter->tx_cleanup_stall_delay =
+                               adapter->missing_tx_completion_to / 2;
+               }
+       }
+
        if (hints->driver_watchdog_timeout) {
                if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
                        adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
@@ -2447,6 +2738,7 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
        }
 
        tx_info->tx_descs = nb_hw_desc;
+       tx_info->timestamp = rte_get_timer_cycles();
 
        tx_ring->tx_stats.cnt++;
        tx_ring->tx_stats.bytes += mbuf->pkt_len;
@@ -2459,12 +2751,12 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
 
 static void ena_tx_cleanup(struct ena_ring *tx_ring)
 {
-       unsigned int cleanup_budget;
        unsigned int total_tx_descs = 0;
+       uint16_t cleanup_budget;
        uint16_t next_to_clean = tx_ring->next_to_clean;
 
-       cleanup_budget = RTE_MIN(tx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
-               (unsigned int)ENA_REFILL_THRESH_PACKET);
+       /* Attempt to release all Tx descriptors (ring_size - 1 -> size_mask) */
+       cleanup_budget = tx_ring->size_mask;
 
        while (likely(total_tx_descs < cleanup_budget)) {
                struct rte_mbuf *mbuf;
@@ -2479,6 +2771,7 @@ static void ena_tx_cleanup(struct ena_ring *tx_ring)
 
                /* Get Tx info & store how many descs were processed  */
                tx_info = &tx_ring->tx_buffer_info[req_id];
+               tx_info->timestamp = 0;
 
                mbuf = tx_info->mbuf;
                rte_pktmbuf_free(mbuf);
@@ -2499,12 +2792,16 @@ static void ena_tx_cleanup(struct ena_ring *tx_ring)
                ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
                ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
        }
+
+       /* Notify completion handler that the cleanup was just called */
+       tx_ring->last_cleanup_ticks = rte_get_timer_cycles();
 }
 
 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                                  uint16_t nb_pkts)
 {
        struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
+       int available_desc;
        uint16_t sent_idx = 0;
 
 #ifdef RTE_ETHDEV_DEBUG_TX
@@ -2524,8 +2821,8 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        tx_ring->size_mask)]);
        }
 
-       tx_ring->tx_stats.available_desc =
-               ena_com_free_q_entries(tx_ring->ena_com_io_sq);
+       available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq);
+       tx_ring->tx_stats.available_desc = available_desc;
 
        /* If there are ready packets to be xmitted... */
        if (likely(tx_ring->pkts_without_db)) {
@@ -2535,7 +2832,8 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                tx_ring->pkts_without_db = false;
        }
 
-       ena_tx_cleanup(tx_ring);
+       if (available_desc < tx_ring->tx_free_thresh)
+               ena_tx_cleanup(tx_ring);
 
        tx_ring->tx_stats.available_desc =
                ena_com_free_q_entries(tx_ring->ena_com_io_sq);