1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "ssovf_worker.h"
7 static __rte_always_inline void
8 ssows_new_event(struct ssows *ws, const struct rte_event *ev)
10 const uint64_t event_ptr = ev->u64;
11 const uint32_t tag = (uint32_t)ev->event;
12 const uint8_t new_tt = ev->sched_type;
13 const uint8_t grp = ev->queue_id;
15 ssows_add_work(ws, event_ptr, tag, new_tt, grp);
18 static __rte_always_inline void
19 ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
21 const uint8_t cur_tt = ws->cur_tt;
22 const uint8_t new_tt = ev->sched_type;
23 const uint32_t tag = (uint32_t)ev->event;
25 * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
27 * SSO_SYNC_ORDERED norm norm untag
28 * SSO_SYNC_ATOMIC norm norm untag
29 * SSO_SYNC_UNTAGGED full full NOOP
31 if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
32 if (new_tt != SSO_SYNC_UNTAGGED) {
33 ssows_swtag_full(ws, ev->u64, tag,
37 if (likely(new_tt != SSO_SYNC_UNTAGGED))
38 ssows_swtag_norm(ws, tag, new_tt);
40 ssows_swtag_untag(ws);
45 #define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
47 static __rte_always_inline void
48 ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
50 const uint64_t event_ptr = ev->u64;
51 const uint32_t tag = (uint32_t)ev->event;
52 const uint8_t cur_tt = ws->cur_tt;
53 const uint8_t new_tt = ev->sched_type;
55 if (cur_tt == SSO_SYNC_ORDERED) {
56 /* Create unique tag based on custom event type and new grp */
57 uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
61 ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
67 ssows_add_work(ws, event_ptr, tag, new_tt, grp);
70 static __rte_always_inline void
71 ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
73 const uint8_t grp = ev->queue_id;
75 /* Group hasn't changed, Use SWTAG to forward the event */
76 if (ws->cur_grp == grp)
77 ssows_fwd_swtag(ws, ev, grp);
80 * Group has been changed for group based work pipelining,
81 * Use deschedule/add_work operation to transfer the event to
84 ssows_fwd_group(ws, ev, grp);
87 static __rte_always_inline void
88 ssows_release_event(struct ssows *ws)
90 if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
91 ssows_swtag_untag(ws);
94 #define R(name, f2, f1, f0, flags) \
95 static uint16_t __rte_noinline __rte_hot \
96 ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks) \
98 struct ssows *ws = port; \
100 RTE_SET_USED(timeout_ticks); \
102 if (ws->swtag_req) { \
104 ssows_swtag_wait(ws); \
107 return ssows_get_work(ws, ev, flags); \
111 static uint16_t __rte_hot \
112 ssows_deq_burst_ ##name(void *port, struct rte_event ev[], \
113 uint16_t nb_events, uint64_t timeout_ticks) \
115 RTE_SET_USED(nb_events); \
117 return ssows_deq_ ##name(port, ev, timeout_ticks); \
120 static uint16_t __rte_hot \
121 ssows_deq_timeout_ ##name(void *port, struct rte_event *ev, \
122 uint64_t timeout_ticks) \
124 struct ssows *ws = port; \
128 if (ws->swtag_req) { \
130 ssows_swtag_wait(ws); \
132 ret = ssows_get_work(ws, ev, flags); \
133 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
134 ret = ssows_get_work(ws, ev, flags); \
139 static uint16_t __rte_hot \
140 ssows_deq_timeout_burst_ ##name(void *port, struct rte_event ev[], \
141 uint16_t nb_events, uint64_t timeout_ticks) \
143 RTE_SET_USED(nb_events); \
145 return ssows_deq_timeout_ ##name(port, ev, timeout_ticks); \
148 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
151 __rte_always_inline uint16_t __rte_hot
152 ssows_enq(void *port, const struct rte_event *ev)
154 struct ssows *ws = port;
158 case RTE_EVENT_OP_NEW:
160 ssows_new_event(ws, ev);
162 case RTE_EVENT_OP_FORWARD:
163 ssows_forward_event(ws, ev);
165 case RTE_EVENT_OP_RELEASE:
166 ssows_release_event(ws);
175 ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
177 RTE_SET_USED(nb_events);
178 return ssows_enq(port, ev);
182 ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
185 struct ssows *ws = port;
188 for (i = 0; i < nb_events; i++)
189 ssows_new_event(ws, &ev[i]);
195 ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
197 struct ssows *ws = port;
198 RTE_SET_USED(nb_events);
200 ssows_forward_event(ws, ev);
206 ssows_flush_events(struct ssows *ws, uint8_t queue_id,
207 ssows_handle_event_t fn, void *arg)
211 uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
212 uint64_t get_work0, get_work1;
213 uint64_t sched_type_queue;
214 uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
216 enable = ssovf_read64(base + SSO_VHGRP_QCTL);
220 reg_off = SSOW_VHWS_OP_GET_WORK0;
221 reg_off |= 1 << 17; /* Grouped */
222 reg_off |= 1 << 16; /* WAIT */
223 reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
224 while (aq_cnt || cq_ds_cnt) {
225 aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
226 cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
227 /* Extract cq and ds count */
228 cq_ds_cnt &= 0x1FFF1FFF0000;
230 ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
232 sched_type_queue = (get_work0 >> 32) & 0xfff;
233 ws->cur_tt = sched_type_queue & 0x3;
234 ws->cur_grp = sched_type_queue >> 2;
235 sched_type_queue = sched_type_queue << 38;
236 ev.event = sched_type_queue | (get_work0 & 0xffffffff);
237 if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
238 ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
239 (ev.event >> 20) & 0x7F,
240 OCCTX_RX_OFFLOAD_NONE |
241 OCCTX_RX_MULTI_SEG_F,
246 if (fn != NULL && ev.u64 != 0)
252 ssows_reset(struct ssows *ws)
259 tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
260 pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
262 if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
263 pend_tt = (pend_tag >> 32) & 0x3;
264 if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
267 tt = (tag >> 32) & 0x3;
268 if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
269 ssows_swtag_untag(ws);
273 static __rte_always_inline uint16_t
274 __sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
275 uint16_t nb_events, uint64_t *cmd,
281 struct rte_eth_dev *ethdev;
282 struct ssows *ws = port;
283 struct octeontx_txq *txq;
285 RTE_SET_USED(nb_events);
286 switch (ev->sched_type) {
287 case SSO_SYNC_ORDERED:
288 ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
290 ssows_swtag_wait(ws);
292 case SSO_SYNC_UNTAGGED:
293 ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
296 ssows_swtag_wait(ws);
298 case SSO_SYNC_ATOMIC:
305 queue_id = rte_event_eth_tx_adapter_txq_get(m);
306 ethdev = &rte_eth_devices[port_id];
307 txq = ethdev->data->tx_queues[queue_id];
309 return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag);
312 #define T(name, f3, f2, f1, f0, sz, flags) \
313 static uint16_t __rte_noinline __rte_hot \
314 sso_event_tx_adapter_enqueue_ ## name(void *port, struct rte_event ev[], \
315 uint16_t nb_events) \
318 return __sso_event_tx_adapter_enqueue(port, ev, nb_events, cmd, \
322 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
325 static uint16_t __rte_hot
326 ssow_crypto_adapter_enqueue(void *port, struct rte_event ev[],
329 RTE_SET_USED(nb_events);
331 return otx_crypto_adapter_enqueue(port, ev->event_ptr);
335 ssovf_fastpath_fns_set(struct rte_eventdev *dev)
337 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
339 dev->enqueue = ssows_enq;
340 dev->enqueue_burst = ssows_enq_burst;
341 dev->enqueue_new_burst = ssows_enq_new_burst;
342 dev->enqueue_forward_burst = ssows_enq_fwd_burst;
344 dev->ca_enqueue = ssow_crypto_adapter_enqueue;
346 const event_tx_adapter_enqueue_t ssow_txa_enqueue[2][2][2][2] = {
347 #define T(name, f3, f2, f1, f0, sz, flags) \
348 [f3][f2][f1][f0] = sso_event_tx_adapter_enqueue_ ##name,
350 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
354 dev->txa_enqueue = ssow_txa_enqueue
355 [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)]
356 [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
357 [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)]
358 [!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
360 dev->txa_enqueue_same_dest = dev->txa_enqueue;
362 /* Assigning dequeue func pointers */
363 const event_dequeue_t ssow_deq[2][2][2] = {
364 #define R(name, f2, f1, f0, flags) \
365 [f2][f1][f0] = ssows_deq_ ##name,
367 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
371 dev->dequeue = ssow_deq
372 [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
373 [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
374 [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
376 const event_dequeue_burst_t ssow_deq_burst[2][2][2] = {
377 #define R(name, f2, f1, f0, flags) \
378 [f2][f1][f0] = ssows_deq_burst_ ##name,
380 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
384 dev->dequeue_burst = ssow_deq_burst
385 [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
386 [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
387 [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
389 if (edev->is_timeout_deq) {
390 const event_dequeue_t ssow_deq_timeout[2][2][2] = {
391 #define R(name, f2, f1, f0, flags) \
392 [f2][f1][f0] = ssows_deq_timeout_ ##name,
394 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
398 dev->dequeue = ssow_deq_timeout
399 [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
400 [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
401 [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
403 const event_dequeue_burst_t ssow_deq_timeout_burst[2][2][2] = {
404 #define R(name, f2, f1, f0, flags) \
405 [f2][f1][f0] = ssows_deq_timeout_burst_ ##name,
407 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
411 dev->dequeue_burst = ssow_deq_timeout_burst
412 [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
413 [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
414 [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
419 octeontx_create_rx_ol_flags_array(void *mem)
421 uint16_t idx, errcode, errlev;
422 uint32_t val, *ol_flags;
424 /* Skip ptype array memory */
425 ol_flags = (uint32_t *)mem;
427 for (idx = 0; idx < BIT(ERRCODE_ERRLEN_WIDTH); idx++) {
428 errcode = idx & 0xff;
429 errlev = (idx & 0x700) >> 8;
431 val = RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
432 val |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
433 val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN;
436 case OCCTX_ERRLEV_RE:
438 val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
439 val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
441 val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
442 val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
445 case OCCTX_ERRLEV_LC:
446 if (errcode == OCCTX_EC_IP4_CSUM) {
447 val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
448 val |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
450 val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
453 case OCCTX_ERRLEV_LD:
454 /* Check if parsed packet is neither IPv4 or IPV6 */
455 if (errcode == OCCTX_EC_IP4_NOT)
457 val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
458 if (errcode == OCCTX_EC_L4_CSUM)
459 val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
461 val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
463 case OCCTX_ERRLEV_LE:
464 if (errcode == OCCTX_EC_IP4_CSUM)
465 val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
467 val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
469 case OCCTX_ERRLEV_LF:
470 /* Check if parsed packet is neither IPv4 or IPV6 */
471 if (errcode == OCCTX_EC_IP4_NOT)
473 val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
474 if (errcode == OCCTX_EC_L4_CSUM)
475 val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
477 val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
486 octeontx_fastpath_lookup_mem_get(void)
488 const char name[] = OCCTX_FASTPATH_LOOKUP_MEM;
489 const struct rte_memzone *mz;
492 mz = rte_memzone_lookup(name);
496 /* Request for the first time */
497 mz = rte_memzone_reserve_aligned(name, LOOKUP_ARRAY_SZ,
498 SOCKET_ID_ANY, 0, OCCTX_ALIGN);
501 /* Form the rx ol_flags based on errcode */
502 octeontx_create_rx_ol_flags_array(mem);