power: add get/set pause duration API
[dpdk.git] / drivers / event / cnxk / cn10k_eventdev.c
index 2431875..bf0a991 100644 (file)
@@ -7,28 +7,18 @@
 #include "cnxk_worker.h"
 
 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                           \
-       (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]    \
-                        [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]  \
-                        [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]      \
-                        [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
-                        [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]    \
-                        [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]       \
-                        [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
+       deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
 
 #define CN10K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                           \
-       (enq_op =                                                              \
-                enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]     \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
-                       [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
+       enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)]
 
 static uint32_t
 cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
 {
-       uint32_t wdata = BIT(16) | 1;
+       uint32_t wdata = 1;
+
+       if (dev->deq_tmo_ns)
+               wdata |= BIT(16);
 
        switch (dev->gw_mode) {
        case CN10K_GW_MODE_NONE:
@@ -63,7 +53,6 @@ cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
        /* First cache line is reserved for cookie */
        ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
        ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
-       ws->tx_base = ws->base;
        ws->hws_id = port_id;
        ws->swtag_req = 0;
        ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
@@ -91,19 +80,19 @@ cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
 }
 
 static void
-cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
+cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
 {
        struct cnxk_sso_evdev *dev = arg;
        struct cn10k_sso_hws *ws = hws;
        uint64_t val;
 
-       rte_memcpy(ws->grps_base, grps_base,
-                  sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
+       ws->grp_base = grp_base;
        ws->fc_mem = (uint64_t *)dev->fc_iova;
        ws->xaq_lmt = dev->xaq_lmt;
 
        /* Set get_work timeout for HWS */
-       val = NSEC2USEC(dev->deq_tmo_ns) - 1;
+       val = NSEC2USEC(dev->deq_tmo_ns);
+       val = val ? val - 1 : 0;
        plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
 }
 
@@ -112,17 +101,18 @@ cn10k_sso_hws_release(void *arg, void *hws)
 {
        struct cnxk_sso_evdev *dev = arg;
        struct cn10k_sso_hws *ws = hws;
-       int i;
+       uint16_t i;
 
        for (i = 0; i < dev->nb_event_queues; i++)
-               roc_sso_hws_unlink(&dev->sso, ws->hws_id, (uint16_t *)&i, 1);
+               roc_sso_hws_unlink(&dev->sso, ws->hws_id, &i, 1);
        memset(ws, 0, sizeof(*ws));
 }
 
-static void
+static int
 cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
                           cnxk_handle_event_t fn, void *arg)
 {
+       uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
        struct cn10k_sso_hws *ws = hws;
        uint64_t cq_ds_cnt = 1;
        uint64_t aq_cnt = 1;
@@ -144,13 +134,16 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 
        while (aq_cnt || cq_ds_cnt || ds_cnt) {
                plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
-               cn10k_sso_hws_get_work_empty(ws, &ev);
+               cn10k_sso_hws_get_work_empty(
+                       ws, &ev,
+                       (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+                               NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
                if (fn != NULL && ev.u64 != 0)
                        fn(arg, ev);
                if (ev.sched_type != SSO_TT_EMPTY)
-                       cnxk_sso_hws_swtag_flush(
-                               ws->base + SSOW_LF_GWS_WQE0,
-                               ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+                       cnxk_sso_hws_swtag_flush(ws->base);
+               else if (retry-- == 0)
+                       break;
                do {
                        val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
                } while (val & BIT_ULL(56));
@@ -161,8 +154,13 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
                cq_ds_cnt &= 0x3FFF3FFF0000;
        }
 
+       if (aq_cnt || cq_ds_cnt || ds_cnt)
+               return -EAGAIN;
+
        plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
        rte_mb();
+
+       return 0;
 }
 
 static void
@@ -177,15 +175,23 @@ cn10k_sso_hws_reset(void *arg, void *hws)
                uint64_t u64[2];
        } gw;
        uint8_t pend_tt;
+       bool is_pend;
 
        plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
        /* Wait till getwork/swtp/waitw/desched completes. */
+       is_pend = false;
+       /* Work in WQE0 is always consumed, unless its a SWTAG. */
+       pend_state = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+       if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
+           ws->swtag_req)
+               is_pend = true;
+
        do {
                pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
        } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
                               BIT_ULL(56) | BIT_ULL(54)));
        pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
-       if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+       if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */
                if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
                        cnxk_sso_hws_swtag_untag(base +
                                                 SSOW_LF_GWS_OP_SWTAG_UNTAG);
@@ -199,15 +205,10 @@ cn10k_sso_hws_reset(void *arg, void *hws)
 
        switch (dev->gw_mode) {
        case CN10K_GW_MODE_PREF:
+       case CN10K_GW_MODE_PREF_WFE:
                while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
                        ;
                break;
-       case CN10K_GW_MODE_PREF_WFE:
-               while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
-                      SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
-                       continue;
-               plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
-               break;
        case CN10K_GW_MODE_NONE:
        default:
                break;
@@ -273,15 +274,13 @@ cn10k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
                        ws_cookie,
                        sizeof(struct cnxk_sso_hws_cookie) +
                                sizeof(struct cn10k_sso_hws) +
-                               (sizeof(uint64_t) * (dev->max_port_id + 1) *
-                                RTE_MAX_QUEUES_PER_PORT),
+                               dev->tx_adptr_data_sz,
                        RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
                if (ws_cookie == NULL)
                        return -ENOMEM;
                ws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie));
                memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
-                      sizeof(uint64_t) * (dev->max_port_id + 1) *
-                              RTE_MAX_QUEUES_PER_PORT);
+                      dev->tx_adptr_data_sz);
                event_dev->data->ports[i] = ws;
        }
 
@@ -292,108 +291,114 @@ static void
 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 {
        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-       const event_dequeue_t sso_hws_deq[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                            \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_##name,
+       const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_burst_##name,
+       const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_##name,
+       const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_burst_t
-               sso_hws_deq_tmo_burst[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_burst_##name,
+       const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_##name,
+       const event_dequeue_t sso_hws_deq_ca[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_burst_t
-               sso_hws_deq_ca_burst[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_burst_##name,
+       const event_dequeue_burst_t sso_hws_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_##name,
+       const event_dequeue_t sso_hws_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_burst_t
-               sso_hws_deq_seg_burst[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_burst_##name,
+       const event_dequeue_burst_t sso_hws_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_##name,
+       const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_##name,
+
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_burst_t
-               sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
+       const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_burst_##name,
                        NIX_RX_FASTPATH_MODES
 #undef R
-               };
+       };
 
-       const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_##name,
+       const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_burst_t
-               sso_hws_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
-#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_burst_##name,
-                       NIX_RX_FASTPATH_MODES
+       const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
+               NIX_RX_FASTPATH_MODES
+#undef R
+       };
+
+       const event_dequeue_t sso_hws_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_seg_##name,
+               NIX_RX_FASTPATH_MODES
+#undef R
+       };
+
+       const event_dequeue_burst_t sso_hws_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_seg_burst_##name,
+               NIX_RX_FASTPATH_MODES
+#undef R
+       };
+
+       const event_dequeue_t sso_hws_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_seg_##name,
+               NIX_RX_FASTPATH_MODES
+#undef R
+       };
+
+       const event_dequeue_burst_t sso_hws_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
+#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_seg_burst_##name,
+               NIX_RX_FASTPATH_MODES
 #undef R
        };
 
        /* Tx modes */
        const event_tx_adapter_enqueue_t
-               sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
-#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
+               sso_hws_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
+#define T(name, sz, flags)[flags] = cn10k_sso_hws_tx_adptr_enq_##name,
                        NIX_TX_FASTPATH_MODES
 #undef T
                };
 
        const event_tx_adapter_enqueue_t
-               sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
-#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
-       [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
+               sso_hws_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
+#define T(name, sz, flags)[flags] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
                        NIX_TX_FASTPATH_MODES
 #undef T
                };
@@ -419,6 +424,12 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                               sso_hws_deq_ca_seg_burst);
                }
+               if (dev->is_timeout_deq && dev->is_ca_internal_port) {
+                       CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+                                              sso_hws_deq_tmo_ca_seg);
+                       CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+                                              sso_hws_deq_tmo_ca_seg_burst);
+               }
        } else {
                CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
                CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
@@ -435,6 +446,12 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                               sso_hws_deq_ca_burst);
                }
+               if (dev->is_timeout_deq && dev->is_ca_internal_port) {
+                       CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+                                              sso_hws_deq_tmo_ca);
+                       CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+                                              sso_hws_deq_tmo_ca_burst);
+               }
        }
        event_dev->ca_enqueue = cn10k_sso_hws_ca_enq;
 
@@ -527,6 +544,66 @@ free:
        rte_free(gws_cookie);
 }
 
+static void
+cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port,
+                      rte_eventdev_port_flush_t flush_cb, void *args)
+{
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       struct cn10k_sso_hws *ws = port;
+       struct rte_event ev;
+       uint64_t ptag;
+       bool is_pend;
+
+       is_pend = false;
+       /* Work in WQE0 is always consumed, unless its a SWTAG. */
+       ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+       if (ptag & (BIT_ULL(62) | BIT_ULL(54)) || ws->swtag_req)
+               is_pend = true;
+       do {
+               ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+       } while (ptag &
+                (BIT_ULL(62) | BIT_ULL(58) | BIT_ULL(56) | BIT_ULL(54)));
+
+       cn10k_sso_hws_get_work_empty(ws, &ev,
+                                    (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+                                            NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
+       if (is_pend && ev.u64) {
+               if (flush_cb)
+                       flush_cb(event_dev->data->dev_id, ev, args);
+               cnxk_sso_hws_swtag_flush(ws->base);
+       }
+
+       /* Check if we have work in PRF_WQE0, if so extract it. */
+       switch (dev->gw_mode) {
+       case CN10K_GW_MODE_PREF:
+       case CN10K_GW_MODE_PREF_WFE:
+               while (plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0) &
+                      BIT_ULL(63))
+                       ;
+               break;
+       case CN10K_GW_MODE_NONE:
+       default:
+               break;
+       }
+
+       if (CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0)) !=
+           SSO_TT_EMPTY) {
+               plt_write64(BIT_ULL(16) | 1,
+                           ws->base + SSOW_LF_GWS_OP_GET_WORK0);
+               cn10k_sso_hws_get_work_empty(
+                       ws, &ev,
+                       (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+                               NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
+               if (ev.u64) {
+                       if (flush_cb)
+                               flush_cb(event_dev->data->dev_id, ev, args);
+                       cnxk_sso_hws_swtag_flush(ws->base);
+               }
+       }
+       ws->swtag_req = 0;
+       plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
+}
+
 static int
 cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
                    const uint8_t queues[], const uint8_t priorities[],
@@ -712,21 +789,70 @@ cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
        return 0;
 }
 
+static void
+cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
+{
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+       struct cn10k_eth_txq *txq;
+       struct roc_nix_sq *sq;
+       int i;
+
+       if (tx_queue_id < 0) {
+               for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+                       cn10k_sso_txq_fc_update(eth_dev, i);
+       } else {
+               uint16_t sqes_per_sqb;
+
+               sq = &cnxk_eth_dev->sqs[tx_queue_id];
+               txq = eth_dev->data->tx_queues[tx_queue_id];
+               sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
+               sq->nb_sqb_bufs_adj =
+                       sq->nb_sqb_bufs -
+                       RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
+                               sqes_per_sqb;
+               if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+                       sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
+                                               (sqes_per_sqb - 1));
+               txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
+               txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
+       }
+}
+
 static int
 cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
                               const struct rte_eth_dev *eth_dev,
                               int32_t tx_queue_id)
 {
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       uint64_t tx_offloads;
        int rc;
 
        RTE_SET_USED(id);
        rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
        if (rc < 0)
                return rc;
+
+       /* Can't enable tstamp if all the ports don't have it enabled. */
+       tx_offloads = cnxk_eth_dev->tx_offload_flags;
+       if (dev->tx_adptr_configured) {
+               uint8_t tstmp_req = !!(tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
+               uint8_t tstmp_ena =
+                       !!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
+
+               if (tstmp_ena && !tstmp_req)
+                       dev->tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
+               else if (!tstmp_ena && tstmp_req)
+                       tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
+       }
+
+       dev->tx_offloads |= tx_offloads;
+       cn10k_sso_txq_fc_update(eth_dev, tx_queue_id);
        rc = cn10k_sso_updt_tx_adptr_data(event_dev);
        if (rc < 0)
                return rc;
        cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+       dev->tx_adptr_configured = 1;
 
        return 0;
 }
@@ -791,12 +917,17 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 static struct eventdev_ops cn10k_sso_dev_ops = {
        .dev_infos_get = cn10k_sso_info_get,
        .dev_configure = cn10k_sso_dev_configure,
+
        .queue_def_conf = cnxk_sso_queue_def_conf,
        .queue_setup = cnxk_sso_queue_setup,
        .queue_release = cnxk_sso_queue_release,
+       .queue_attr_get = cnxk_sso_queue_attribute_get,
+       .queue_attr_set = cnxk_sso_queue_attribute_set,
+
        .port_def_conf = cnxk_sso_port_def_conf,
        .port_setup = cn10k_sso_port_setup,
        .port_release = cn10k_sso_port_release,
+       .port_quiesce = cn10k_sso_port_quiesce,
        .port_link = cn10k_sso_port_link,
        .port_unlink = cn10k_sso_port_unlink,
        .timeout_ticks = cnxk_sso_timeout_ticks,
@@ -881,9 +1012,11 @@ static const struct rte_pci_id cn10k_pci_sso_map[] = {
        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
+       CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
+       CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
        {
                .vendor_id = 0,
        },
@@ -906,4 +1039,5 @@ RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
                              CNXK_TIM_DISABLE_NPA "=1"
                              CNXK_TIM_CHNK_SLOTS "=<int>"
                              CNXK_TIM_RINGS_LMT "=<int>"
-                             CNXK_TIM_STATS_ENA "=1");
+                             CNXK_TIM_STATS_ENA "=1"
+                             CNXK_TIM_EXT_CLK "=<string>");