drivers/net: redefine array size macros
[dpdk.git] / drivers / net / ena / ena_ethdev.c
index b1c215b..9aa51c9 100644 (file)
@@ -5,8 +5,8 @@
 
 #include <rte_string_fns.h>
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
 #include <rte_tcp.h>
 #include <rte_atomic.h>
 #include <rte_dev.h>
@@ -27,8 +27,8 @@
 #include <ena_eth_io_defs.h>
 
 #define DRV_MODULE_VER_MAJOR   2
-#define DRV_MODULE_VER_MINOR   1
-#define DRV_MODULE_VER_SUBMINOR        0
+#define DRV_MODULE_VER_MINOR   2
+#define DRV_MODULE_VER_SUBMINOR        1
 
 #define ENA_IO_TXQ_IDX(q)      (2 * (q))
 #define ENA_IO_RXQ_IDX(q)      (2 * (q) + 1)
@@ -47,7 +47,7 @@
 #define ENA_HASH_KEY_SIZE      40
 #define ETH_GSTRING_LEN        32
 
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define ARRAY_SIZE(x) RTE_DIM(x)
 
 #define ENA_MIN_RING_DESC      128
 
@@ -380,20 +380,6 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
        }
 }
 
-static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
-{
-       if (likely(req_id < rx_ring->ring_size))
-               return 0;
-
-       PMD_DRV_LOG(ERR, "Invalid rx req_id: %hu\n", req_id);
-
-       rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
-       rx_ring->adapter->trigger_reset = true;
-       ++rx_ring->rx_stats.bad_req_id;
-
-       return -EFAULT;
-}
-
 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
 {
        struct ena_tx_buffer *tx_info = NULL;
@@ -1246,6 +1232,10 @@ static int ena_queue_start(struct ena_ring *ring)
                PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
                return ENA_COM_FAULT;
        }
+       /* Flush per-core RX buffers pools cache as they can be used on other
+        * cores as well.
+        */
+       rte_mempool_cache_flush(NULL, ring->mb_pool);
 
        return 0;
 }
@@ -1292,6 +1282,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
        txq->ring_size = nb_desc;
        txq->size_mask = nb_desc - 1;
        txq->numa_socket_id = socket_id;
+       txq->pkts_without_db = false;
 
        txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
                                          sizeof(struct ena_tx_buffer) *
@@ -1482,10 +1473,6 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
                        rte_prefetch0(mbufs[i + 4]);
 
                req_id = rxq->empty_rx_reqs[next_to_use];
-               rc = validate_rx_req_id(rxq, req_id);
-               if (unlikely(rc))
-                       break;
-
                rx_info = &rxq->rx_buffer_info[req_id];
 
                rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id);
@@ -2110,8 +2097,6 @@ static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
 
        len = ena_bufs[buf].len;
        req_id = ena_bufs[buf].req_id;
-       if (unlikely(validate_rx_req_id(rx_ring, req_id)))
-               return NULL;
 
        rx_info = &rx_ring->rx_buffer_info[req_id];
 
@@ -2135,10 +2120,6 @@ static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
                ++buf;
                len = ena_bufs[buf].len;
                req_id = ena_bufs[buf].req_id;
-               if (unlikely(validate_rx_req_id(rx_ring, req_id))) {
-                       rte_mbuf_raw_free(mbuf_head);
-                       return NULL;
-               }
 
                rx_info = &rx_ring->rx_buffer_info[req_id];
                RTE_ASSERT(rx_info->mbuf != NULL);
@@ -2226,10 +2207,16 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                                    &ena_rx_ctx);
                if (unlikely(rc)) {
                        PMD_DRV_LOG(ERR, "ena_com_rx_pkt error %d\n", rc);
-                       rx_ring->adapter->reset_reason =
-                               ENA_REGS_RESET_TOO_MANY_RX_DESCS;
+                       if (rc == ENA_COM_NO_SPACE) {
+                               ++rx_ring->rx_stats.bad_desc_num;
+                               rx_ring->adapter->reset_reason =
+                                       ENA_REGS_RESET_TOO_MANY_RX_DESCS;
+                       } else {
+                               ++rx_ring->rx_stats.bad_req_id;
+                               rx_ring->adapter->reset_reason =
+                                       ENA_REGS_RESET_INV_RX_REQ_ID;
+                       }
                        rx_ring->adapter->trigger_reset = true;
-                       ++rx_ring->rx_stats.bad_desc_num;
                        return 0;
                }
 
@@ -2373,8 +2360,8 @@ static void ena_update_hints(struct ena_adapter *adapter,
        }
 }
 
-static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
-                                       struct rte_mbuf *mbuf)
+static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring,
+                                             struct rte_mbuf *mbuf)
 {
        struct ena_com_dev *ena_dev;
        int num_segments, header_len, rc;
@@ -2384,13 +2371,21 @@ static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
        header_len = mbuf->data_len;
 
        if (likely(num_segments < tx_ring->sgl_size))
-               return 0;
+               goto checkspace;
 
        if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
            (num_segments == tx_ring->sgl_size) &&
            (header_len < tx_ring->tx_max_header_size))
-               return 0;
+               goto checkspace;
 
+       /* Checking for space for 2 additional metadata descriptors due to
+        * possible header split and metadata descriptor. Linearization will
+        * be needed so we reduce the segments number from num_segments to 1
+        */
+       if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) {
+               PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n");
+               return ENA_COM_NO_MEM;
+       }
        ++tx_ring->tx_stats.linearize;
        rc = rte_pktmbuf_linearize(mbuf);
        if (unlikely(rc)) {
@@ -2400,7 +2395,19 @@ static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
                return rc;
        }
 
-       return rc;
+       return 0;
+
+checkspace:
+       /* Checking for space for 2 additional metadata descriptors due to
+        * possible header split and metadata descriptor
+        */
+       if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+                                         num_segments + 2)) {
+               PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n");
+               return ENA_COM_NO_MEM;
+       }
+
+       return 0;
 }
 
 static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
@@ -2487,7 +2494,7 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
        int nb_hw_desc;
        int rc;
 
-       rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
+       rc = ena_check_space_and_linearize_mbuf(tx_ring, mbuf);
        if (unlikely(rc))
                return rc;
 
@@ -2515,6 +2522,8 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
                        "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
                        tx_ring->id);
                ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+               tx_ring->tx_stats.doorbells++;
+               tx_ring->pkts_without_db = false;
        }
 
        /* prepare the packet's descriptors to dma engine */
@@ -2593,13 +2602,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                return 0;
        }
 
-       nb_pkts = RTE_MIN(ena_com_free_q_entries(tx_ring->ena_com_io_sq),
-               nb_pkts);
-
        for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
                if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx]))
                        break;
-
+               tx_ring->pkts_without_db = true;
                rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4,
                        tx_ring->size_mask)]);
        }
@@ -2608,10 +2614,11 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                ena_com_free_q_entries(tx_ring->ena_com_io_sq);
 
        /* If there are ready packets to be xmitted... */
-       if (sent_idx > 0) {
+       if (likely(tx_ring->pkts_without_db)) {
                /* ...let HW do its best :-) */
                ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
                tx_ring->tx_stats.doorbells++;
+               tx_ring->pkts_without_db = false;
        }
 
        ena_tx_cleanup(tx_ring);