net/mlx5: fix E-Switch DV flow disabling
[dpdk.git] / drivers / net / ark / ark_ethdev_rx.c
index 37a88cb..0fbb260 100644 (file)
@@ -12,7 +12,7 @@
 
 #define ARK_RX_META_SIZE 32
 #define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
-#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+#define ARK_RX_MPU_CHUNK (64U)
 
 /* Forward declarations */
 struct ark_rx_queue;
@@ -41,6 +41,9 @@ struct ark_rx_queue {
        rx_user_meta_hook_fn rx_user_meta_hook;
        void *ext_user_data;
 
+       uint32_t dataroom;
+       uint32_t headroom;
+
        uint32_t queue_size;
        uint32_t queue_mask;
 
@@ -102,7 +105,7 @@ static inline void
 eth_ark_rx_update_cons_index(struct ark_rx_queue *queue, uint32_t cons_index)
 {
        queue->cons_index = cons_index;
-       if ((cons_index + queue->queue_size - queue->seed_index) >= 64U) {
+       if ((cons_index + queue->queue_size - queue->seed_index) >= ARK_RX_MPU_CHUNK) {
                eth_ark_rx_seed_mbufs(queue);
                ark_mpu_set_producer(queue->mpu, queue->seed_index);
        }
@@ -164,6 +167,9 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        /* NOTE zmalloc is used, no need to 0 indexes, etc. */
        queue->mb_pool = mb_pool;
+       queue->dataroom = rte_pktmbuf_data_room_size(mb_pool) -
+               RTE_PKTMBUF_HEADROOM;
+       queue->headroom = RTE_PKTMBUF_HEADROOM;
        queue->phys_qid = qidx;
        queue->queue_index = queue_idx;
        queue->queue_size = nb_desc;
@@ -174,12 +180,12 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
        queue->reserve_q =
                rte_zmalloc_socket("Ark_rx_queue mbuf",
                                   nb_desc * sizeof(struct rte_mbuf *),
-                                  64,
+                                  512,
                                   socket_id);
        queue->paddress_q =
                rte_zmalloc_socket("Ark_rx_queue paddr",
                                   nb_desc * sizeof(rte_iova_t),
-                                  64,
+                                  512,
                                   socket_id);
 
        if (queue->reserve_q == 0 || queue->paddress_q == 0) {
@@ -196,6 +202,15 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
        queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET);
        queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET);
 
+       /* Configure UDM per queue */
+       ark_udm_stop(queue->udm, 0);
+       ark_udm_configure(queue->udm,
+                         RTE_PKTMBUF_HEADROOM,
+                         queue->dataroom,
+                         ARK_RX_WRITE_TIME_NS);
+       ark_udm_stats_reset(queue->udm);
+       ark_udm_stop(queue->udm, 0);
+
        /* populate mbuf reserve */
        status = eth_ark_rx_seed_mbufs(queue);
 
@@ -267,6 +282,7 @@ eth_ark_recv_pkts(void *rx_queue,
                mbuf->data_len = meta->pkt_len;
 
                if (ARK_DEBUG_CORE) {   /* debug sanity checks */
+
                        if ((meta->pkt_len > (1024 * 16)) ||
                            (meta->pkt_len == 0)) {
                                ARK_PMD_LOG(DEBUG, "RX: Bad Meta Q: %u"
@@ -295,7 +311,7 @@ eth_ark_recv_pkts(void *rx_queue,
                        }
                }
 
-               if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN))
+               if (unlikely(meta->pkt_len > queue->dataroom))
                        cons_index = eth_ark_rx_jumbo
                                (queue, meta, mbuf, cons_index + 1);
                else
@@ -336,14 +352,14 @@ eth_ark_rx_jumbo(struct ark_rx_queue *queue,
        /* first buf populated by called */
        mbuf_prev = mbuf0;
        segments = 1;
-       data_len = RTE_MIN(meta->pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
+       data_len = RTE_MIN(meta->pkt_len, queue->dataroom);
        remaining = meta->pkt_len - data_len;
        mbuf0->data_len = data_len;
 
        /* HW guarantees that the data does not exceed prod_index! */
        while (remaining != 0) {
                data_len = RTE_MIN(remaining,
-                                  RTE_MBUF_DEFAULT_DATAROOM);
+                                  queue->dataroom);
 
                remaining -= data_len;
                segments += 1;
@@ -431,7 +447,8 @@ eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id)
 static inline int
 eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
 {
-       uint32_t limit = queue->cons_index + queue->queue_size;
+       uint32_t limit = (queue->cons_index & ~(ARK_RX_MPU_CHUNK - 1)) +
+               queue->queue_size;
        uint32_t seed_index = queue->seed_index;
 
        uint32_t count = 0;
@@ -594,14 +611,14 @@ eth_ark_udm_force_close(struct rte_eth_dev *dev)
 
                        ark_mpu_start(queue->mpu);
                        /* Add some buffers */
-                       index = 100000 + queue->seed_index;
+                       index = ARK_RX_MPU_CHUNK + queue->seed_index;
                        ark_mpu_set_producer(queue->mpu, index);
                }
                /* Wait to allow data to pass */
                usleep(100);
 
-               ARK_PMD_LOG(DEBUG, "UDM forced flush attempt, stopped = %d\n",
-                               ark_udm_is_flushed(ark->udm.v));
+               ARK_PMD_LOG(NOTICE, "UDM forced flush attempt, stopped = %d\n",
+                           ark_udm_is_flushed(ark->udm.v));
        }
        ark_udm_reset(ark->udm.v);
 }