event/cnxk: store and reuse workslot status
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
index 237170d..f8652d4 100644 (file)
        deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
 
 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
-       (enq_op = enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]    \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
+       enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)]
 
 static int
 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
@@ -265,17 +259,14 @@ cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
                                ws_cookie,
                                sizeof(struct cnxk_sso_hws_cookie) +
                                        sizeof(struct cn9k_sso_hws_dual) +
-                                       (sizeof(uint64_t) *
-                                        (dev->max_port_id + 1) *
-                                        RTE_MAX_QUEUES_PER_PORT),
+                                       dev->tx_adptr_data_sz,
                                RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
                        if (ws_cookie == NULL)
                                return -ENOMEM;
                        dws = RTE_PTR_ADD(ws_cookie,
                                          sizeof(struct cnxk_sso_hws_cookie));
                        memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
-                              sizeof(uint64_t) * (dev->max_port_id + 1) *
-                                      RTE_MAX_QUEUES_PER_PORT);
+                              dev->tx_adptr_data_sz);
                        event_dev->data->ports[i] = dws;
                } else {
                        struct cn9k_sso_hws *ws = event_dev->data->ports[i];
@@ -286,17 +277,14 @@ cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
                                ws_cookie,
                                sizeof(struct cnxk_sso_hws_cookie) +
                                        sizeof(struct cn9k_sso_hws_dual) +
-                                       (sizeof(uint64_t) *
-                                        (dev->max_port_id + 1) *
-                                        RTE_MAX_QUEUES_PER_PORT),
+                                       dev->tx_adptr_data_sz,
                                RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
                        if (ws_cookie == NULL)
                                return -ENOMEM;
                        ws = RTE_PTR_ADD(ws_cookie,
                                         sizeof(struct cnxk_sso_hws_cookie));
                        memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
-                              sizeof(uint64_t) * (dev->max_port_id + 1) *
-                                      RTE_MAX_QUEUES_PER_PORT);
+                              dev->tx_adptr_data_sz);
                        event_dev->data->ports[i] = ws;
                }
        }
@@ -504,30 +492,26 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
        };
 
        /* Tx modes */
-       const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
-#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
-       [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
+       const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
+#define T(name, sz, flags)[flags] = cn9k_sso_hws_tx_adptr_enq_##name,
                NIX_TX_FASTPATH_MODES
 #undef T
        };
 
-       const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
-#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
-       [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
+       const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
+#define T(name, sz, flags)[flags] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
                NIX_TX_FASTPATH_MODES
 #undef T
        };
 
-       const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
-#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
-       [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
+       const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
+#define T(name, sz, flags)[flags] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
                NIX_TX_FASTPATH_MODES
 #undef T
        };
 
-       const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
-#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
-       [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
+       const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
+#define T(name, sz, flags)[flags] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
                NIX_TX_FASTPATH_MODES
 #undef T
        };
@@ -997,17 +981,36 @@ cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
                              const struct rte_eth_dev *eth_dev,
                              int32_t tx_queue_id)
 {
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       uint64_t tx_offloads;
        int rc;
 
        RTE_SET_USED(id);
        rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
        if (rc < 0)
                return rc;
+
+       /* Can't enable tstamp if all the ports don't have it enabled. */
+       tx_offloads = cnxk_eth_dev->tx_offload_flags;
+       if (dev->tx_adptr_configured) {
+               uint8_t tstmp_req = !!(tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
+               uint8_t tstmp_ena =
+                       !!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
+
+               if (tstmp_ena && !tstmp_req)
+                       dev->tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
+               else if (!tstmp_ena && tstmp_req)
+                       tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
+       }
+
+       dev->tx_offloads |= tx_offloads;
        cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
        rc = cn9k_sso_updt_tx_adptr_data(event_dev);
        if (rc < 0)
                return rc;
        cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+       dev->tx_adptr_configured = 1;
 
        return 0;
 }