1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN9K_WORKER_H__
6 #define __CN9K_WORKER_H__
8 #include <rte_eventdev.h>
11 #include "cnxk_ethdev.h"
12 #include "cnxk_eventdev.h"
13 #include "cnxk_worker.h"
14 #include "cn9k_cryptodev_ops.h"
16 #include "cn9k_ethdev.h"
22 static __rte_always_inline uint8_t
23 cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
25 const uint32_t tag = (uint32_t)ev->event;
26 const uint8_t new_tt = ev->sched_type;
27 const uint64_t event_ptr = ev->u64;
28 const uint16_t grp = ev->queue_id;
30 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
31 if (ws->xaq_lmt <= *ws->fc_mem)
34 cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
35 ws->grp_base + (grp << 12));
39 static __rte_always_inline void
40 cn9k_sso_hws_fwd_swtag(uint64_t base, const struct rte_event *ev)
42 const uint32_t tag = (uint32_t)ev->event;
43 const uint8_t new_tt = ev->sched_type;
44 const uint8_t cur_tt =
45 CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_TAG));
48 * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
50 * SSO_TT_ORDERED norm norm untag
51 * SSO_TT_ATOMIC norm norm untag
52 * SSO_TT_UNTAGGED norm norm NOOP
55 if (new_tt == SSO_TT_UNTAGGED) {
56 if (cur_tt != SSO_TT_UNTAGGED)
57 cnxk_sso_hws_swtag_untag(base +
58 SSOW_LF_GWS_OP_SWTAG_UNTAG);
60 cnxk_sso_hws_swtag_norm(tag, new_tt,
61 base + SSOW_LF_GWS_OP_SWTAG_NORM);
65 static __rte_always_inline void
66 cn9k_sso_hws_fwd_group(uint64_t base, const struct rte_event *ev,
69 const uint32_t tag = (uint32_t)ev->event;
70 const uint8_t new_tt = ev->sched_type;
72 plt_write64(ev->u64, base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
73 cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
74 base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
77 static __rte_always_inline void
78 cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
80 const uint8_t grp = ev->queue_id;
82 /* Group hasn't changed, Use SWTAG to forward the event */
83 if (CNXK_GRP_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_TAG)) == grp) {
84 cn9k_sso_hws_fwd_swtag(ws->base, ev);
88 * Group has been changed for group based work pipelining,
89 * Use deschedule/add_work operation to transfer the event to
92 cn9k_sso_hws_fwd_group(ws->base, ev, grp);
98 static __rte_always_inline uint8_t
99 cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
100 const struct rte_event *ev)
102 const uint32_t tag = (uint32_t)ev->event;
103 const uint8_t new_tt = ev->sched_type;
104 const uint64_t event_ptr = ev->u64;
105 const uint16_t grp = ev->queue_id;
107 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
108 if (dws->xaq_lmt <= *dws->fc_mem)
111 cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
112 dws->grp_base + (grp << 12));
116 static __rte_always_inline void
117 cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base,
118 const struct rte_event *ev)
120 const uint8_t grp = ev->queue_id;
122 /* Group hasn't changed, Use SWTAG to forward the event */
123 if (CNXK_GRP_FROM_TAG(plt_read64(base + SSOW_LF_GWS_TAG)) == grp) {
124 cn9k_sso_hws_fwd_swtag(base, ev);
128 * Group has been changed for group based work pipelining,
129 * Use deschedule/add_work operation to transfer the event to
132 cn9k_sso_hws_fwd_group(base, ev, grp);
136 static __rte_always_inline void
137 cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
138 const uint32_t tag, const uint32_t flags,
139 const void *const lookup_mem)
141 const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
142 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
144 cn9k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
145 (struct rte_mbuf *)mbuf, lookup_mem,
146 mbuf_init | ((uint64_t)port_id) << 48, flags);
149 static __rte_always_inline uint16_t
150 cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
151 struct rte_event *ev, const uint32_t flags,
152 const void *const lookup_mem,
153 struct cnxk_timesync_info *const tstamp)
155 const uint64_t set_gw = BIT_ULL(16) | 1;
157 __uint128_t get_work;
163 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
164 rte_prefetch_non_temporal(lookup_mem);
165 #ifdef RTE_ARCH_ARM64
166 asm volatile(PLT_CPU_FEATURE_PREAMBLE
168 " ldr %[tag], [%[tag_loc]] \n"
169 " ldr %[wqp], [%[wqp_loc]] \n"
170 " tbnz %[tag], 63, rty%= \n"
171 "done%=: str %[gw], [%[pong]] \n"
173 " sub %[mbuf], %[wqp], #0x80 \n"
174 " prfm pldl1keep, [%[mbuf]] \n"
175 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
177 : [tag_loc] "r"(base + SSOW_LF_GWS_TAG),
178 [wqp_loc] "r"(base + SSOW_LF_GWS_WQP), [gw] "r"(set_gw),
179 [pong] "r"(pair_base + SSOW_LF_GWS_OP_GET_WORK0));
181 gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
182 while ((BIT_ULL(63)) & gw.u64[0])
183 gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
184 gw.u64[1] = plt_read64(base + SSOW_LF_GWS_WQP);
185 plt_write64(set_gw, pair_base + SSOW_LF_GWS_OP_GET_WORK0);
186 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
189 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
190 (gw.u64[0] & (0x3FFull << 36)) << 4 |
191 (gw.u64[0] & 0xffffffff);
193 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
194 if ((flags & CPT_RX_WQE_F) &&
195 (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
196 RTE_EVENT_TYPE_CRYPTODEV)) {
197 gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
198 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
199 RTE_EVENT_TYPE_ETHDEV) {
200 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
202 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
203 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
204 gw.u64[0] & 0xFFFFF, flags,
206 /* Extracting tstamp, if PTP enabled*/
207 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
209 CNXK_SSO_WQE_SG_PTR);
210 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
211 flags & NIX_RX_OFFLOAD_TSTAMP_F,
212 flags & NIX_RX_MULTI_SEG_F,
213 (uint64_t *)tstamp_ptr);
218 ev->event = gw.u64[0];
224 static __rte_always_inline uint16_t
225 cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
226 const uint32_t flags, const void *const lookup_mem)
229 __uint128_t get_work;
235 plt_write64(BIT_ULL(16) | /* wait for work. */
236 1, /* Use Mask set 0. */
237 ws->base + SSOW_LF_GWS_OP_GET_WORK0);
239 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
240 rte_prefetch_non_temporal(lookup_mem);
241 #ifdef RTE_ARCH_ARM64
242 asm volatile(PLT_CPU_FEATURE_PREAMBLE
243 " ldr %[tag], [%[tag_loc]] \n"
244 " ldr %[wqp], [%[wqp_loc]] \n"
245 " tbz %[tag], 63, done%= \n"
248 " ldr %[tag], [%[tag_loc]] \n"
249 " ldr %[wqp], [%[wqp_loc]] \n"
250 " tbnz %[tag], 63, rty%= \n"
252 " sub %[mbuf], %[wqp], #0x80 \n"
253 " prfm pldl1keep, [%[mbuf]] \n"
254 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
256 : [tag_loc] "r"(ws->base + SSOW_LF_GWS_TAG),
257 [wqp_loc] "r"(ws->base + SSOW_LF_GWS_WQP));
259 gw.u64[0] = plt_read64(ws->base + SSOW_LF_GWS_TAG);
260 while ((BIT_ULL(63)) & gw.u64[0])
261 gw.u64[0] = plt_read64(ws->base + SSOW_LF_GWS_TAG);
263 gw.u64[1] = plt_read64(ws->base + SSOW_LF_GWS_WQP);
264 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
267 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
268 (gw.u64[0] & (0x3FFull << 36)) << 4 |
269 (gw.u64[0] & 0xffffffff);
271 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
272 if ((flags & CPT_RX_WQE_F) &&
273 (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
274 RTE_EVENT_TYPE_CRYPTODEV)) {
275 gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
276 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
277 RTE_EVENT_TYPE_ETHDEV) {
278 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
280 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
281 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
282 gw.u64[0] & 0xFFFFF, flags,
284 /* Extracting tstamp, if PTP enabled*/
285 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
287 CNXK_SSO_WQE_SG_PTR);
288 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
290 flags & NIX_RX_OFFLOAD_TSTAMP_F,
291 flags & NIX_RX_MULTI_SEG_F,
292 (uint64_t *)tstamp_ptr);
297 ev->event = gw.u64[0];
303 /* Used in cleaning up workslot. */
304 static __rte_always_inline uint16_t
305 cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev)
308 __uint128_t get_work;
313 #ifdef RTE_ARCH_ARM64
314 asm volatile(PLT_CPU_FEATURE_PREAMBLE
315 " ldr %[tag], [%[tag_loc]] \n"
316 " ldr %[wqp], [%[wqp_loc]] \n"
317 " tbz %[tag], 63, done%= \n"
320 " ldr %[tag], [%[tag_loc]] \n"
321 " ldr %[wqp], [%[wqp_loc]] \n"
322 " tbnz %[tag], 63, rty%= \n"
324 " sub %[mbuf], %[wqp], #0x80 \n"
325 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
327 : [tag_loc] "r"(base + SSOW_LF_GWS_TAG),
328 [wqp_loc] "r"(base + SSOW_LF_GWS_WQP));
330 gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
331 while ((BIT_ULL(63)) & gw.u64[0])
332 gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
334 gw.u64[1] = plt_read64(base + SSOW_LF_GWS_WQP);
335 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
338 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
339 (gw.u64[0] & (0x3FFull << 36)) << 4 |
340 (gw.u64[0] & 0xffffffff);
342 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
343 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
344 RTE_EVENT_TYPE_ETHDEV) {
345 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
347 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
348 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
349 gw.u64[0] & 0xFFFFF, 0, NULL);
354 ev->event = gw.u64[0];
360 /* CN9K Fastpath functions. */
361 uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev);
362 uint16_t __rte_hot cn9k_sso_hws_enq_burst(void *port,
363 const struct rte_event ev[],
365 uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(void *port,
366 const struct rte_event ev[],
368 uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port,
369 const struct rte_event ev[],
372 uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port,
373 const struct rte_event *ev);
374 uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(void *port,
375 const struct rte_event ev[],
377 uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port,
378 const struct rte_event ev[],
380 uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
381 const struct rte_event ev[],
383 uint16_t __rte_hot cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[],
385 uint16_t __rte_hot cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[],
388 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
389 uint16_t __rte_hot cn9k_sso_hws_deq_##name( \
390 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
391 uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name( \
392 void *port, struct rte_event ev[], uint16_t nb_events, \
393 uint64_t timeout_ticks); \
394 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_##name( \
395 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
396 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name( \
397 void *port, struct rte_event ev[], uint16_t nb_events, \
398 uint64_t timeout_ticks); \
399 uint16_t __rte_hot cn9k_sso_hws_deq_ca_##name( \
400 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
401 uint16_t __rte_hot cn9k_sso_hws_deq_ca_burst_##name( \
402 void *port, struct rte_event ev[], uint16_t nb_events, \
403 uint64_t timeout_ticks); \
404 uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name( \
405 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
406 uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name( \
407 void *port, struct rte_event ev[], uint16_t nb_events, \
408 uint64_t timeout_ticks); \
409 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name( \
410 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
411 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name( \
412 void *port, struct rte_event ev[], uint16_t nb_events, \
413 uint64_t timeout_ticks); \
414 uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_##name( \
415 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
416 uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_burst_##name( \
417 void *port, struct rte_event ev[], uint16_t nb_events, \
418 uint64_t timeout_ticks);
420 NIX_RX_FASTPATH_MODES
423 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
424 uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name( \
425 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
426 uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name( \
427 void *port, struct rte_event ev[], uint16_t nb_events, \
428 uint64_t timeout_ticks); \
429 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_##name( \
430 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
431 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name( \
432 void *port, struct rte_event ev[], uint16_t nb_events, \
433 uint64_t timeout_ticks); \
434 uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_##name( \
435 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
436 uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_burst_##name( \
437 void *port, struct rte_event ev[], uint16_t nb_events, \
438 uint64_t timeout_ticks); \
439 uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name( \
440 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
441 uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name( \
442 void *port, struct rte_event ev[], uint16_t nb_events, \
443 uint64_t timeout_ticks); \
444 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name( \
445 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
446 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name( \
447 void *port, struct rte_event ev[], uint16_t nb_events, \
448 uint64_t timeout_ticks); \
449 uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_##name( \
450 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
451 uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_burst_##name( \
452 void *port, struct rte_event ev[], uint16_t nb_events, \
453 uint64_t timeout_ticks);
455 NIX_RX_FASTPATH_MODES
458 static __rte_always_inline void
459 cn9k_sso_txq_fc_wait(const struct cn9k_eth_txq *txq)
461 while (!((txq->nb_sqb_bufs_adj -
462 __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
463 << (txq)->sqes_per_sqb_log2))
467 static __rte_always_inline const struct cn9k_eth_txq *
468 cn9k_sso_hws_xtract_meta(struct rte_mbuf *m,
469 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
471 return (const struct cn9k_eth_txq *)
472 txq_data[m->port][rte_event_eth_tx_adapter_txq_get(m)];
475 static __rte_always_inline void
476 cn9k_sso_hws_prepare_pkt(const struct cn9k_eth_txq *txq, struct rte_mbuf *m,
477 uint64_t *cmd, const uint32_t flags)
479 roc_lmt_mov(cmd, txq->cmd, cn9k_nix_tx_ext_subs(flags));
480 cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
483 #if defined(RTE_ARCH_ARM64)
485 static __rte_always_inline void
486 cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
487 struct rte_mbuf *m, uint64_t *cmd,
490 struct cn9k_outb_priv_data *outb_priv;
491 rte_iova_t io_addr = txq->cpt_io_addr;
492 uint64_t *lmt_addr = txq->lmt_addr;
493 struct cn9k_sec_sess_priv mdata;
494 struct nix_send_hdr_s *send_hdr;
495 uint64_t sa_base = txq->sa_base;
496 uint32_t pkt_len, dlen_adj, rlen;
497 uint64x2_t cmd01, cmd23;
498 uint64_t lmt_status, sa;
499 union nix_send_sg_s *sg;
500 uintptr_t dptr, nixtx;
501 uint64_t ucode_cmd[4];
505 mdata.u64 = *rte_security_dynfield(m);
506 send_hdr = (struct nix_send_hdr_s *)cmd;
507 if (flags & NIX_TX_NEED_EXT_HDR)
508 sg = (union nix_send_sg_s *)&cmd[4];
510 sg = (union nix_send_sg_s *)&cmd[2];
512 if (flags & NIX_TX_NEED_SEND_HDR_W1)
513 l2_len = cmd[1] & 0xFF;
518 dptr = *(uint64_t *)(sg + 1);
519 pkt_len = send_hdr->w0.total;
522 rlen = pkt_len - l2_len;
523 rlen = (rlen + mdata.roundup_len) + (mdata.roundup_byte - 1);
524 rlen &= ~(uint64_t)(mdata.roundup_byte - 1);
525 rlen += mdata.partial_len;
526 dlen_adj = rlen - pkt_len + l2_len;
528 /* Update send descriptors. Security is single segment only */
529 send_hdr->w0.total = pkt_len + dlen_adj;
530 sg->seg1_size = pkt_len + dlen_adj;
532 /* Get area where NIX descriptor needs to be stored */
533 nixtx = dptr + pkt_len + dlen_adj;
535 nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
537 roc_lmt_mov((void *)(nixtx + 16), cmd, cn9k_nix_tx_ext_subs(flags));
539 /* Load opcode and cptr already prepared at pkt metadata set */
541 pkt_len += sizeof(struct roc_onf_ipsec_outb_hdr) +
542 ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ;
543 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
545 sa = (uintptr_t)roc_nix_inl_onf_ipsec_outb_sa(sa_base, mdata.sa_idx);
546 ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | sa);
547 ucode_cmd[0] = (ROC_IE_ONF_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 |
548 0x40UL << 48 | pkt_len);
550 /* CPT Word 0 and Word 1 */
551 cmd01 = vdupq_n_u64((nixtx + 16) | (cn9k_nix_tx_ext_subs(flags) + 1));
552 /* CPT_RES_S is 16B above NIXTX */
553 cmd01 = vsetq_lane_u8(nixtx & BIT_ULL(7), cmd01, 8);
555 /* CPT word 2 and 3 */
556 cmd23 = vdupq_n_u64(0);
557 cmd23 = vsetq_lane_u64((((uint64_t)RTE_EVENT_TYPE_CPU << 28) |
558 CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20), cmd23, 0);
559 cmd23 = vsetq_lane_u64((uintptr_t)m | 1, cmd23, 1);
561 dptr += l2_len - ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ -
562 sizeof(struct roc_onf_ipsec_outb_hdr);
566 /* Update IV to zero and l2 sz */
567 *(uint16_t *)(dptr + sizeof(struct roc_onf_ipsec_outb_hdr)) =
568 rte_cpu_to_be_16(ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ);
569 iv = (uint64_t *)(dptr + 8);
573 /* Head wait if needed */
575 roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
578 outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd((void *)sa);
579 esn = outb_priv->esn;
580 outb_priv->esn = esn + 1;
582 ucode_cmd[0] |= (esn >> 32) << 16;
583 esn = rte_cpu_to_be_32(esn & (BIT_ULL(32) - 1));
585 /* Update ESN and IPID and IV */
586 *(uint64_t *)dptr = esn << 32 | esn;
589 cn9k_sso_txq_fc_wait(txq);
591 /* Write CPT instruction to lmt line */
592 vst1q_u64(lmt_addr, cmd01);
593 vst1q_u64(lmt_addr + 2, cmd23);
595 roc_lmt_mov_seg(lmt_addr + 4, ucode_cmd, 2);
597 if (roc_lmt_submit_ldeor(io_addr) == 0) {
599 vst1q_u64(lmt_addr, cmd01);
600 vst1q_u64(lmt_addr + 2, cmd23);
601 roc_lmt_mov_seg(lmt_addr + 4, ucode_cmd, 2);
603 lmt_status = roc_lmt_submit_ldeor(io_addr);
604 } while (lmt_status == 0);
610 cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
611 struct rte_mbuf *m, uint64_t *cmd,
622 static __rte_always_inline uint16_t
623 cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
624 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
625 const uint32_t flags)
627 struct rte_mbuf *m = ev->mbuf;
628 const struct cn9k_eth_txq *txq;
629 uint16_t ref_cnt = m->refcnt;
631 /* Perform header writes before barrier for TSO */
632 cn9k_nix_xmit_prepare_tso(m, flags);
633 /* Lets commit any changes in the packet here in case when
634 * fast free is set as no further changes will be made to mbuf.
635 * In case of fast free is not set, both cn9k_nix_prepare_mseg()
636 * and cn9k_nix_xmit_prepare() has a barrier after refcnt update.
638 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
639 !(flags & NIX_TX_OFFLOAD_SECURITY_F))
641 txq = cn9k_sso_hws_xtract_meta(m, txq_data);
642 cn9k_sso_hws_prepare_pkt(txq, m, cmd, flags);
644 if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
645 uint64_t ol_flags = m->ol_flags;
647 if (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
648 uintptr_t ssow_base = base;
653 cn9k_sso_hws_xmit_sec_one(txq, ssow_base, m, cmd,
658 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
662 if (flags & NIX_TX_MULTI_SEG_F) {
663 const uint16_t segdw = cn9k_nix_prepare_mseg(m, cmd, flags);
664 if (!CNXK_TT_FROM_EVENT(ev->event)) {
665 cn9k_nix_xmit_mseg_prep_lmt(cmd, txq->lmt_addr, segdw);
666 roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
667 cn9k_sso_txq_fc_wait(txq);
668 if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
669 cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr,
670 txq->io_addr, segdw);
672 cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr, txq->io_addr,
676 if (!CNXK_TT_FROM_EVENT(ev->event)) {
677 cn9k_nix_xmit_prep_lmt(cmd, txq->lmt_addr, flags);
678 roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
679 cn9k_sso_txq_fc_wait(txq);
680 if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
681 cn9k_nix_xmit_one(cmd, txq->lmt_addr,
682 txq->io_addr, flags);
684 cn9k_nix_xmit_one(cmd, txq->lmt_addr, txq->io_addr,
690 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
695 cnxk_sso_hws_swtag_flush(base + SSOW_LF_GWS_TAG,
696 base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
701 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
702 uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_##name( \
703 void *port, struct rte_event ev[], uint16_t nb_events); \
704 uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_seg_##name( \
705 void *port, struct rte_event ev[], uint16_t nb_events); \
706 uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_##name( \
707 void *port, struct rte_event ev[], uint16_t nb_events); \
708 uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_seg_##name( \
709 void *port, struct rte_event ev[], uint16_t nb_events);
711 NIX_TX_FASTPATH_MODES