net/cnxk: support inline security setup for cn10k
[dpdk.git] / drivers / event / cnxk / cn9k_worker.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef __CN9K_WORKER_H__
6 #define __CN9K_WORKER_H__
7
8 #include "cnxk_ethdev.h"
9 #include "cnxk_eventdev.h"
10 #include "cnxk_worker.h"
11 #include "cn9k_cryptodev_ops.h"
12
13 #include "cn9k_ethdev.h"
14 #include "cn9k_rx.h"
15 #include "cn9k_tx.h"
16
17 /* SSO Operations */
18
19 static __rte_always_inline uint8_t
20 cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
21 {
22         const uint32_t tag = (uint32_t)ev->event;
23         const uint8_t new_tt = ev->sched_type;
24         const uint64_t event_ptr = ev->u64;
25         const uint16_t grp = ev->queue_id;
26
27         rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
28         if (ws->xaq_lmt <= *ws->fc_mem)
29                 return 0;
30
31         cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
32         return 1;
33 }
34
35 static __rte_always_inline void
36 cn9k_sso_hws_fwd_swtag(struct cn9k_sso_hws_state *vws,
37                        const struct rte_event *ev)
38 {
39         const uint32_t tag = (uint32_t)ev->event;
40         const uint8_t new_tt = ev->sched_type;
41         const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(vws->tag_op));
42
43         /* CNXK model
44          * cur_tt/new_tt     SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
45          *
46          * SSO_TT_ORDERED        norm           norm             untag
47          * SSO_TT_ATOMIC         norm           norm               untag
48          * SSO_TT_UNTAGGED       norm           norm             NOOP
49          */
50
51         if (new_tt == SSO_TT_UNTAGGED) {
52                 if (cur_tt != SSO_TT_UNTAGGED)
53                         cnxk_sso_hws_swtag_untag(
54                                 CN9K_SSOW_GET_BASE_ADDR(vws->getwrk_op) +
55                                 SSOW_LF_GWS_OP_SWTAG_UNTAG);
56         } else {
57                 cnxk_sso_hws_swtag_norm(tag, new_tt, vws->swtag_norm_op);
58         }
59 }
60
61 static __rte_always_inline void
62 cn9k_sso_hws_fwd_group(struct cn9k_sso_hws_state *ws,
63                        const struct rte_event *ev, const uint16_t grp)
64 {
65         const uint32_t tag = (uint32_t)ev->event;
66         const uint8_t new_tt = ev->sched_type;
67
68         plt_write64(ev->u64, CN9K_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
69                                      SSOW_LF_GWS_OP_UPD_WQP_GRP1);
70         cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);
71 }
72
73 static __rte_always_inline void
74 cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
75 {
76         const uint8_t grp = ev->queue_id;
77
78         /* Group hasn't changed, Use SWTAG to forward the event */
79         if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_op)) == grp) {
80                 cn9k_sso_hws_fwd_swtag((struct cn9k_sso_hws_state *)ws, ev);
81                 ws->swtag_req = 1;
82         } else {
83                 /*
84                  * Group has been changed for group based work pipelining,
85                  * Use deschedule/add_work operation to transfer the event to
86                  * new group/core
87                  */
88                 cn9k_sso_hws_fwd_group((struct cn9k_sso_hws_state *)ws, ev,
89                                        grp);
90         }
91 }
92
93 /* Dual ws ops. */
94
95 static __rte_always_inline uint8_t
96 cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
97                             const struct rte_event *ev)
98 {
99         const uint32_t tag = (uint32_t)ev->event;
100         const uint8_t new_tt = ev->sched_type;
101         const uint64_t event_ptr = ev->u64;
102         const uint16_t grp = ev->queue_id;
103
104         rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
105         if (dws->xaq_lmt <= *dws->fc_mem)
106                 return 0;
107
108         cnxk_sso_hws_add_work(event_ptr, tag, new_tt, dws->grps_base[grp]);
109         return 1;
110 }
111
112 static __rte_always_inline void
113 cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws,
114                                 struct cn9k_sso_hws_state *vws,
115                                 const struct rte_event *ev)
116 {
117         const uint8_t grp = ev->queue_id;
118
119         /* Group hasn't changed, Use SWTAG to forward the event */
120         if (CNXK_GRP_FROM_TAG(plt_read64(vws->tag_op)) == grp) {
121                 cn9k_sso_hws_fwd_swtag(vws, ev);
122                 dws->swtag_req = 1;
123         } else {
124                 /*
125                  * Group has been changed for group based work pipelining,
126                  * Use deschedule/add_work operation to transfer the event to
127                  * new group/core
128                  */
129                 cn9k_sso_hws_fwd_group(vws, ev, grp);
130         }
131 }
132
133 static __rte_always_inline void
134 cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
135                  const uint32_t tag, const uint32_t flags,
136                  const void *const lookup_mem)
137 {
138         const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
139                                    (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
140
141         cn9k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
142                              (struct rte_mbuf *)mbuf, lookup_mem,
143                              mbuf_init | ((uint64_t)port_id) << 48, flags);
144 }
145
146 static __rte_always_inline uint16_t
147 cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
148                            struct cn9k_sso_hws_state *ws_pair,
149                            struct rte_event *ev, const uint32_t flags,
150                            const void *const lookup_mem,
151                            struct cnxk_timesync_info *const tstamp)
152 {
153         const uint64_t set_gw = BIT_ULL(16) | 1;
154         union {
155                 __uint128_t get_work;
156                 uint64_t u64[2];
157         } gw;
158         uint64_t tstamp_ptr;
159         uint64_t mbuf;
160
161         if (flags & NIX_RX_OFFLOAD_PTYPE_F)
162                 rte_prefetch_non_temporal(lookup_mem);
163 #ifdef RTE_ARCH_ARM64
164         asm volatile(PLT_CPU_FEATURE_PREAMBLE
165                      "rty%=:                                    \n"
166                      "          ldr %[tag], [%[tag_loc]]        \n"
167                      "          ldr %[wqp], [%[wqp_loc]]        \n"
168                      "          tbnz %[tag], 63, rty%=          \n"
169                      "done%=:   str %[gw], [%[pong]]            \n"
170                      "          dmb ld                          \n"
171                      "          sub %[mbuf], %[wqp], #0x80      \n"
172                      "          prfm pldl1keep, [%[mbuf]]       \n"
173                      : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
174                        [mbuf] "=&r"(mbuf)
175                      : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op),
176                        [gw] "r"(set_gw), [pong] "r"(ws_pair->getwrk_op));
177 #else
178         gw.u64[0] = plt_read64(ws->tag_op);
179         while ((BIT_ULL(63)) & gw.u64[0])
180                 gw.u64[0] = plt_read64(ws->tag_op);
181         gw.u64[1] = plt_read64(ws->wqp_op);
182         plt_write64(set_gw, ws_pair->getwrk_op);
183         mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
184 #endif
185
186         gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
187                     (gw.u64[0] & (0x3FFull << 36)) << 4 |
188                     (gw.u64[0] & 0xffffffff);
189
190         if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
191                 if ((flags & CPT_RX_WQE_F) &&
192                     (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
193                      RTE_EVENT_TYPE_CRYPTODEV)) {
194                         gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
195                 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
196                            RTE_EVENT_TYPE_ETHDEV) {
197                         uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
198
199                         gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
200                         cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
201                                          gw.u64[0] & 0xFFFFF, flags,
202                                          lookup_mem);
203                         /* Extracting tstamp, if PTP enabled*/
204                         tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
205                                                             gw.u64[1]) +
206                                                    CNXK_SSO_WQE_SG_PTR);
207                         cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
208                                                 flags & NIX_RX_OFFLOAD_TSTAMP_F,
209                                                 flags & NIX_RX_MULTI_SEG_F,
210                                                 (uint64_t *)tstamp_ptr);
211                         gw.u64[1] = mbuf;
212                 }
213         }
214
215         ev->event = gw.u64[0];
216         ev->u64 = gw.u64[1];
217
218         return !!gw.u64[1];
219 }
220
221 static __rte_always_inline uint16_t
222 cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
223                       const uint32_t flags, const void *const lookup_mem)
224 {
225         union {
226                 __uint128_t get_work;
227                 uint64_t u64[2];
228         } gw;
229         uint64_t tstamp_ptr;
230         uint64_t mbuf;
231
232         plt_write64(BIT_ULL(16) | /* wait for work. */
233                             1,    /* Use Mask set 0. */
234                     ws->getwrk_op);
235
236         if (flags & NIX_RX_OFFLOAD_PTYPE_F)
237                 rte_prefetch_non_temporal(lookup_mem);
238 #ifdef RTE_ARCH_ARM64
239         asm volatile(PLT_CPU_FEATURE_PREAMBLE
240                      "          ldr %[tag], [%[tag_loc]]        \n"
241                      "          ldr %[wqp], [%[wqp_loc]]        \n"
242                      "          tbz %[tag], 63, done%=          \n"
243                      "          sevl                            \n"
244                      "rty%=:    wfe                             \n"
245                      "          ldr %[tag], [%[tag_loc]]        \n"
246                      "          ldr %[wqp], [%[wqp_loc]]        \n"
247                      "          tbnz %[tag], 63, rty%=          \n"
248                      "done%=:   dmb ld                          \n"
249                      "          sub %[mbuf], %[wqp], #0x80      \n"
250                      "          prfm pldl1keep, [%[mbuf]]       \n"
251                      : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
252                        [mbuf] "=&r"(mbuf)
253                      : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
254 #else
255         gw.u64[0] = plt_read64(ws->tag_op);
256         while ((BIT_ULL(63)) & gw.u64[0])
257                 gw.u64[0] = plt_read64(ws->tag_op);
258
259         gw.u64[1] = plt_read64(ws->wqp_op);
260         mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
261 #endif
262
263         gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
264                     (gw.u64[0] & (0x3FFull << 36)) << 4 |
265                     (gw.u64[0] & 0xffffffff);
266
267         if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
268                 if ((flags & CPT_RX_WQE_F) &&
269                     (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
270                      RTE_EVENT_TYPE_CRYPTODEV)) {
271                         gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
272                 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
273                            RTE_EVENT_TYPE_ETHDEV) {
274                         uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
275
276                         gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
277                         cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
278                                          gw.u64[0] & 0xFFFFF, flags,
279                                          lookup_mem);
280                         /* Extracting tstamp, if PTP enabled*/
281                         tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
282                                                             gw.u64[1]) +
283                                                    CNXK_SSO_WQE_SG_PTR);
284                         cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
285                                                 ws->tstamp,
286                                                 flags & NIX_RX_OFFLOAD_TSTAMP_F,
287                                                 flags & NIX_RX_MULTI_SEG_F,
288                                                 (uint64_t *)tstamp_ptr);
289                         gw.u64[1] = mbuf;
290                 }
291         }
292
293         ev->event = gw.u64[0];
294         ev->u64 = gw.u64[1];
295
296         return !!gw.u64[1];
297 }
298
299 /* Used in cleaning up workslot. */
300 static __rte_always_inline uint16_t
301 cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
302 {
303         union {
304                 __uint128_t get_work;
305                 uint64_t u64[2];
306         } gw;
307         uint64_t mbuf;
308
309 #ifdef RTE_ARCH_ARM64
310         asm volatile(PLT_CPU_FEATURE_PREAMBLE
311                      "          ldr %[tag], [%[tag_loc]]        \n"
312                      "          ldr %[wqp], [%[wqp_loc]]        \n"
313                      "          tbz %[tag], 63, done%=          \n"
314                      "          sevl                            \n"
315                      "rty%=:    wfe                             \n"
316                      "          ldr %[tag], [%[tag_loc]]        \n"
317                      "          ldr %[wqp], [%[wqp_loc]]        \n"
318                      "          tbnz %[tag], 63, rty%=          \n"
319                      "done%=:   dmb ld                          \n"
320                      "          sub %[mbuf], %[wqp], #0x80      \n"
321                      : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
322                        [mbuf] "=&r"(mbuf)
323                      : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
324 #else
325         gw.u64[0] = plt_read64(ws->tag_op);
326         while ((BIT_ULL(63)) & gw.u64[0])
327                 gw.u64[0] = plt_read64(ws->tag_op);
328
329         gw.u64[1] = plt_read64(ws->wqp_op);
330         mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
331 #endif
332
333         gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
334                     (gw.u64[0] & (0x3FFull << 36)) << 4 |
335                     (gw.u64[0] & 0xffffffff);
336
337         if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
338                 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
339                     RTE_EVENT_TYPE_ETHDEV) {
340                         uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
341
342                         gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
343                         cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
344                                          gw.u64[0] & 0xFFFFF, 0, NULL);
345                         gw.u64[1] = mbuf;
346                 }
347         }
348
349         ev->event = gw.u64[0];
350         ev->u64 = gw.u64[1];
351
352         return !!gw.u64[1];
353 }
354
355 /* CN9K Fastpath functions. */
356 uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev);
357 uint16_t __rte_hot cn9k_sso_hws_enq_burst(void *port,
358                                           const struct rte_event ev[],
359                                           uint16_t nb_events);
360 uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(void *port,
361                                               const struct rte_event ev[],
362                                               uint16_t nb_events);
363 uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port,
364                                               const struct rte_event ev[],
365                                               uint16_t nb_events);
366
367 uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port,
368                                          const struct rte_event *ev);
369 uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(void *port,
370                                                const struct rte_event ev[],
371                                                uint16_t nb_events);
372 uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port,
373                                                    const struct rte_event ev[],
374                                                    uint16_t nb_events);
375 uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
376                                                    const struct rte_event ev[],
377                                                    uint16_t nb_events);
378 uint16_t __rte_hot cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[],
379                                        uint16_t nb_events);
380 uint16_t __rte_hot cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[],
381                                             uint16_t nb_events);
382
383 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
384         uint16_t __rte_hot cn9k_sso_hws_deq_##name(                            \
385                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
386         uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name(                      \
387                 void *port, struct rte_event ev[], uint16_t nb_events,         \
388                 uint64_t timeout_ticks);                                       \
389         uint16_t __rte_hot cn9k_sso_hws_deq_tmo_##name(                        \
390                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
391         uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name(                  \
392                 void *port, struct rte_event ev[], uint16_t nb_events,         \
393                 uint64_t timeout_ticks);                                       \
394         uint16_t __rte_hot cn9k_sso_hws_deq_ca_##name(                         \
395                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
396         uint16_t __rte_hot cn9k_sso_hws_deq_ca_burst_##name(                   \
397                 void *port, struct rte_event ev[], uint16_t nb_events,         \
398                 uint64_t timeout_ticks);                                       \
399         uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name(                        \
400                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
401         uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name(                  \
402                 void *port, struct rte_event ev[], uint16_t nb_events,         \
403                 uint64_t timeout_ticks);                                       \
404         uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name(                    \
405                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
406         uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name(              \
407                 void *port, struct rte_event ev[], uint16_t nb_events,         \
408                 uint64_t timeout_ticks);                                       \
409         uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_##name(                     \
410                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
411         uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_burst_##name(               \
412                 void *port, struct rte_event ev[], uint16_t nb_events,         \
413                 uint64_t timeout_ticks);
414
415 NIX_RX_FASTPATH_MODES
416 #undef R
417
418 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
419         uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name(                       \
420                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
421         uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name(                 \
422                 void *port, struct rte_event ev[], uint16_t nb_events,         \
423                 uint64_t timeout_ticks);                                       \
424         uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_##name(                   \
425                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
426         uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name(             \
427                 void *port, struct rte_event ev[], uint16_t nb_events,         \
428                 uint64_t timeout_ticks);                                       \
429         uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_##name(                    \
430                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
431         uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_burst_##name(              \
432                 void *port, struct rte_event ev[], uint16_t nb_events,         \
433                 uint64_t timeout_ticks);                                       \
434         uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name(                   \
435                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
436         uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name(             \
437                 void *port, struct rte_event ev[], uint16_t nb_events,         \
438                 uint64_t timeout_ticks);                                       \
439         uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name(               \
440                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
441         uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name(         \
442                 void *port, struct rte_event ev[], uint16_t nb_events,         \
443                 uint64_t timeout_ticks);                                       \
444         uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_##name(                \
445                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
446         uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_burst_##name(          \
447                 void *port, struct rte_event ev[], uint16_t nb_events,         \
448                 uint64_t timeout_ticks);
449
450 NIX_RX_FASTPATH_MODES
451 #undef R
452
453 static __rte_always_inline void
454 cn9k_sso_txq_fc_wait(const struct cn9k_eth_txq *txq)
455 {
456         while (!((txq->nb_sqb_bufs_adj -
457                   __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
458                  << (txq)->sqes_per_sqb_log2))
459                 ;
460 }
461
462 static __rte_always_inline const struct cn9k_eth_txq *
463 cn9k_sso_hws_xtract_meta(struct rte_mbuf *m,
464                          const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
465 {
466         return (const struct cn9k_eth_txq *)
467                 txq_data[m->port][rte_event_eth_tx_adapter_txq_get(m)];
468 }
469
470 static __rte_always_inline void
471 cn9k_sso_hws_prepare_pkt(const struct cn9k_eth_txq *txq, struct rte_mbuf *m,
472                          uint64_t *cmd, const uint32_t flags)
473 {
474         roc_lmt_mov(cmd, txq->cmd, cn9k_nix_tx_ext_subs(flags));
475         cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
476 }
477
478 static __rte_always_inline uint16_t
479 cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
480                       const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
481                       const uint32_t flags)
482 {
483         struct rte_mbuf *m = ev->mbuf;
484         const struct cn9k_eth_txq *txq;
485         uint16_t ref_cnt = m->refcnt;
486
487         /* Perform header writes before barrier for TSO */
488         cn9k_nix_xmit_prepare_tso(m, flags);
489         /* Lets commit any changes in the packet here in case when
490          * fast free is set as no further changes will be made to mbuf.
491          * In case of fast free is not set, both cn9k_nix_prepare_mseg()
492          * and cn9k_nix_xmit_prepare() has a barrier after refcnt update.
493          */
494         if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
495                 rte_io_wmb();
496         txq = cn9k_sso_hws_xtract_meta(m, txq_data);
497         cn9k_sso_hws_prepare_pkt(txq, m, cmd, flags);
498
499         if (flags & NIX_TX_MULTI_SEG_F) {
500                 const uint16_t segdw = cn9k_nix_prepare_mseg(m, cmd, flags);
501                 if (!CNXK_TT_FROM_EVENT(ev->event)) {
502                         cn9k_nix_xmit_mseg_prep_lmt(cmd, txq->lmt_addr, segdw);
503                         roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
504                         cn9k_sso_txq_fc_wait(txq);
505                         if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
506                                 cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr,
507                                                        txq->io_addr, segdw);
508                 } else {
509                         cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr, txq->io_addr,
510                                                segdw);
511                 }
512         } else {
513                 if (!CNXK_TT_FROM_EVENT(ev->event)) {
514                         cn9k_nix_xmit_prep_lmt(cmd, txq->lmt_addr, flags);
515                         roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
516                         cn9k_sso_txq_fc_wait(txq);
517                         if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
518                                 cn9k_nix_xmit_one(cmd, txq->lmt_addr,
519                                                   txq->io_addr, flags);
520                 } else {
521                         cn9k_nix_xmit_one(cmd, txq->lmt_addr, txq->io_addr,
522                                           flags);
523                 }
524         }
525
526         if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
527                 if (ref_cnt > 1)
528                         return 1;
529         }
530
531         cnxk_sso_hws_swtag_flush(base + SSOW_LF_GWS_TAG,
532                                  base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
533
534         return 1;
535 }
536
537 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
538         uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_##name(                   \
539                 void *port, struct rte_event ev[], uint16_t nb_events);        \
540         uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_seg_##name(               \
541                 void *port, struct rte_event ev[], uint16_t nb_events);        \
542         uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_##name(              \
543                 void *port, struct rte_event ev[], uint16_t nb_events);        \
544         uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_seg_##name(          \
545                 void *port, struct rte_event ev[], uint16_t nb_events);
546
547 NIX_TX_FASTPATH_MODES
548 #undef T
549
550 #endif