net/ena: fix meta descriptor DF flag setup
[dpdk.git] / drivers / event / cnxk / cn10k_eventdev.c
index c5a8c1a..9b4d289 100644 (file)
@@ -7,28 +7,18 @@
 #include "cnxk_worker.h"
 
 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                           \
-       (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]    \
-                        [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]  \
-                        [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]      \
-                        [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
-                        [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]    \
-                        [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]       \
-                        [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
+       deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
 
 #define CN10K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                           \
-       (enq_op =                                                              \
-                enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]     \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
+       enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)]
 
 static uint32_t
 cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
 {
-       uint32_t wdata = BIT(16) | 1;
+       uint32_t wdata = 1;
+
+       if (dev->deq_tmo_ns)
+               wdata |= BIT(16);
 
        switch (dev->gw_mode) {
        case CN10K_GW_MODE_NONE:
@@ -63,7 +53,6 @@ cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
        /* First cache line is reserved for cookie */
        ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
        ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
-       ws->tx_base = ws->base;
        ws->hws_id = port_id;
        ws->swtag_req = 0;
        ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
@@ -102,7 +91,8 @@ cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
        ws->xaq_lmt = dev->xaq_lmt;
 
        /* Set get_work timeout for HWS */
-       val = NSEC2USEC(dev->deq_tmo_ns) - 1;
+       val = NSEC2USEC(dev->deq_tmo_ns);
+       val = val ? val - 1 : 0;
        plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
 }
 
@@ -111,10 +101,10 @@ cn10k_sso_hws_release(void *arg, void *hws)
 {
        struct cnxk_sso_evdev *dev = arg;
        struct cn10k_sso_hws *ws = hws;
-       int i;
+       uint16_t i;
 
        for (i = 0; i < dev->nb_event_queues; i++)
-               roc_sso_hws_unlink(&dev->sso, ws->hws_id, (uint16_t *)&i, 1);
+               roc_sso_hws_unlink(&dev->sso, ws->hws_id, &i, 1);
        memset(ws, 0, sizeof(*ws));
 }
 
@@ -272,15 +262,13 @@ cn10k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
                        ws_cookie,
                        sizeof(struct cnxk_sso_hws_cookie) +
                                sizeof(struct cn10k_sso_hws) +
-                               (sizeof(uint64_t) * (dev->max_port_id + 1) *
-                                RTE_MAX_QUEUES_PER_PORT),
+                               dev->tx_adptr_data_sz,
                        RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
                if (ws_cookie == NULL)
                        return -ENOMEM;
                ws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie));
                memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
-                      sizeof(uint64_t) * (dev->max_port_id + 1) *
-                              RTE_MAX_QUEUES_PER_PORT);
+                      dev->tx_adptr_data_sz);
                event_dev->data->ports[i] = ws;
        }
 
@@ -291,108 +279,114 @@ static void
 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 {
        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-       const event_dequeue_t sso_hws_deq[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                            \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_##name,
+       const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_burst_##name,
+       const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_##name,
+       const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_burst_t
-               sso_hws_deq_tmo_burst[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_burst_##name,
+       const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_##name,
+       const event_dequeue_t sso_hws_deq_ca[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_burst_t
-               sso_hws_deq_ca_burst[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_burst_##name,
+       const event_dequeue_burst_t sso_hws_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_##name,
+       const event_dequeue_t sso_hws_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_burst_t
-               sso_hws_deq_seg_burst[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_burst_##name,
+       const event_dequeue_burst_t sso_hws_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_##name,
+       const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_##name,
+
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_burst_t
-               sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
+       const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_burst_##name,
                        NIX_RX_FASTPATH_MODES
 #undef R
-               };
+       };
 
-       const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_##name,
+       const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_burst_t
-               sso_hws_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_burst_##name,
-                       NIX_RX_FASTPATH_MODES
+       const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
+               NIX_RX_FASTPATH_MODES
+#undef R
+       };
+
+       const event_dequeue_t sso_hws_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_seg_##name,
+               NIX_RX_FASTPATH_MODES
+#undef R
+       };
+
+       const event_dequeue_burst_t sso_hws_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_seg_burst_##name,
+               NIX_RX_FASTPATH_MODES
+#undef R
+       };
+
+       const event_dequeue_t sso_hws_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_seg_##name,
+               NIX_RX_FASTPATH_MODES
+#undef R
+       };
+
+       const event_dequeue_burst_t sso_hws_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_seg_burst_##name,
+               NIX_RX_FASTPATH_MODES
 #undef R
        };
 
        /* Tx modes */
        const event_tx_adapter_enqueue_t
-               sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
-#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
+               sso_hws_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
+#define T(name, sz, flags)[flags] = cn10k_sso_hws_tx_adptr_enq_##name,
                        NIX_TX_FASTPATH_MODES
 #undef T
                };
 
        const event_tx_adapter_enqueue_t
-               sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
-#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
+               sso_hws_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
+#define T(name, sz, flags)[flags] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
                        NIX_TX_FASTPATH_MODES
 #undef T
                };
@@ -418,6 +412,12 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                               sso_hws_deq_ca_seg_burst);
                }
+               if (dev->is_timeout_deq && dev->is_ca_internal_port) {
+                       CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+                                              sso_hws_deq_tmo_ca_seg);
+                       CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+                                              sso_hws_deq_tmo_ca_seg_burst);
+               }
        } else {
                CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
                CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
@@ -434,6 +434,12 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                               sso_hws_deq_ca_burst);
                }
+               if (dev->is_timeout_deq && dev->is_ca_internal_port) {
+                       CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+                                              sso_hws_deq_tmo_ca);
+                       CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+                                              sso_hws_deq_tmo_ca_burst);
+               }
        }
        event_dev->ca_enqueue = cn10k_sso_hws_ca_enq;
 
@@ -711,21 +717,70 @@ cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
        return 0;
 }
 
+static void
+cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
+{
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+       struct cn10k_eth_txq *txq;
+       struct roc_nix_sq *sq;
+       int i;
+
+       if (tx_queue_id < 0) {
+               for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+                       cn10k_sso_txq_fc_update(eth_dev, i);
+       } else {
+               uint16_t sqes_per_sqb;
+
+               sq = &cnxk_eth_dev->sqs[tx_queue_id];
+               txq = eth_dev->data->tx_queues[tx_queue_id];
+               sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
+               sq->nb_sqb_bufs_adj =
+                       sq->nb_sqb_bufs -
+                       RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
+                               sqes_per_sqb;
+               if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+                       sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
+                                               (sqes_per_sqb - 1));
+               txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
+               txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
+       }
+}
+
 static int
 cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
                               const struct rte_eth_dev *eth_dev,
                               int32_t tx_queue_id)
 {
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       uint64_t tx_offloads;
        int rc;
 
        RTE_SET_USED(id);
        rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
        if (rc < 0)
                return rc;
+
+       /* Can't enable tstamp if all the ports don't have it enabled. */
+       tx_offloads = cnxk_eth_dev->tx_offload_flags;
+       if (dev->tx_adptr_configured) {
+               uint8_t tstmp_req = !!(tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
+               uint8_t tstmp_ena =
+                       !!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
+
+               if (tstmp_ena && !tstmp_req)
+                       dev->tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
+               else if (!tstmp_ena && tstmp_req)
+                       tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
+       }
+
+       dev->tx_offloads |= tx_offloads;
+       cn10k_sso_txq_fc_update(eth_dev, tx_queue_id);
        rc = cn10k_sso_updt_tx_adptr_data(event_dev);
        if (rc < 0)
                return rc;
        cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+       dev->tx_adptr_configured = 1;
 
        return 0;
 }
@@ -905,4 +960,5 @@ RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
                              CNXK_TIM_DISABLE_NPA "=1"
                              CNXK_TIM_CHNK_SLOTS "=<int>"
                              CNXK_TIM_RINGS_LMT "=<int>"
-                             CNXK_TIM_STATS_ENA "=1");
+                             CNXK_TIM_STATS_ENA "=1"
+                             CNXK_TIM_EXT_CLK "=<string>");