X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Focteontx%2Fssovf_worker.c;h=a9149fb9343d734a2f3515d44ea110e6534171a9;hb=300b796262a1;hp=d8bbc7149f7d0b1b1c1362eba15c81b81ce9586c;hpb=8384f0e039eae47cb1b951b2db41400a80fbaeb7;p=dpdk.git diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c index d8bbc7149f..a9149fb934 100644 --- a/drivers/event/octeontx/ssovf_worker.c +++ b/drivers/event/octeontx/ssovf_worker.c @@ -91,59 +91,64 @@ ssows_release_event(struct ssows *ws) ssows_swtag_untag(ws); } -__rte_always_inline uint16_t __hot -ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks) -{ - struct ssows *ws = port; - - RTE_SET_USED(timeout_ticks); - - if (ws->swtag_req) { - ws->swtag_req = 0; - ssows_swtag_wait(ws); - return 1; - } else { - return ssows_get_work(ws, ev); - } +#define R(name, f2, f1, f0, flags) \ +static uint16_t __rte_noinline __rte_hot \ +ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks) \ +{ \ + struct ssows *ws = port; \ + \ + RTE_SET_USED(timeout_ticks); \ + \ + if (ws->swtag_req) { \ + ws->swtag_req = 0; \ + ssows_swtag_wait(ws); \ + return 1; \ + } else { \ + return ssows_get_work(ws, ev, flags); \ + } \ +} \ + \ +static uint16_t __rte_hot \ +ssows_deq_burst_ ##name(void *port, struct rte_event ev[], \ + uint16_t nb_events, uint64_t timeout_ticks) \ +{ \ + RTE_SET_USED(nb_events); \ + \ + return ssows_deq_ ##name(port, ev, timeout_ticks); \ +} \ + \ +static uint16_t __rte_hot \ +ssows_deq_timeout_ ##name(void *port, struct rte_event *ev, \ + uint64_t timeout_ticks) \ +{ \ + struct ssows *ws = port; \ + uint64_t iter; \ + uint16_t ret = 1; \ + \ + if (ws->swtag_req) { \ + ws->swtag_req = 0; \ + ssows_swtag_wait(ws); \ + } else { \ + ret = ssows_get_work(ws, ev, flags); \ + for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \ + ret = ssows_get_work(ws, ev, flags); \ + } \ + return ret; \ +} \ + \ +static uint16_t __rte_hot \ +ssows_deq_timeout_burst_ ##name(void *port, struct rte_event ev[], \ + uint16_t nb_events, uint64_t timeout_ticks) \ +{ \ + RTE_SET_USED(nb_events); \ + \ + return ssows_deq_timeout_ ##name(port, ev, timeout_ticks); \ } -__rte_always_inline uint16_t __hot -ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks) -{ - struct ssows *ws = port; - uint64_t iter; - uint16_t ret = 1; - - if (ws->swtag_req) { - ws->swtag_req = 0; - ssows_swtag_wait(ws); - } else { - ret = ssows_get_work(ws, ev); - for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) - ret = ssows_get_work(ws, ev); - } - return ret; -} +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R -uint16_t __hot -ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events, - uint64_t timeout_ticks) -{ - RTE_SET_USED(nb_events); - - return ssows_deq(port, ev, timeout_ticks); -} - -uint16_t __hot -ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events, - uint64_t timeout_ticks) -{ - RTE_SET_USED(nb_events); - - return ssows_deq_timeout(port, ev, timeout_ticks); -} - -__rte_always_inline uint16_t __hot +__rte_always_inline uint16_t __rte_hot ssows_enq(void *port, const struct rte_event *ev) { struct ssows *ws = port; @@ -166,14 +171,14 @@ ssows_enq(void *port, const struct rte_event *ev) return ret; } -uint16_t __hot +uint16_t __rte_hot ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events) { RTE_SET_USED(nb_events); return ssows_enq(port, ev); } -uint16_t __hot +uint16_t __rte_hot ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events) { uint16_t i; @@ -186,7 +191,7 @@ ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events) return nb_events; } -uint16_t __hot +uint16_t __rte_hot ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events) { struct ssows *ws = port; @@ -204,6 +209,8 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id, uint32_t reg_off; struct rte_event ev; uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1; + uint64_t get_work0, get_work1; + uint64_t sched_type_queue; uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0); enable = ssovf_read64(base + SSO_VHGRP_QCTL); @@ -219,7 +226,23 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id, cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT); /* Extract cq and ds count */ cq_ds_cnt &= 0x1FFF1FFF0000; - ssows_get_work(ws, &ev); + + ssovf_load_pair(get_work0, get_work1, ws->base + reg_off); + + sched_type_queue = (get_work0 >> 32) & 0xfff; + ws->cur_tt = sched_type_queue & 0x3; + ws->cur_grp = sched_type_queue >> 2; + sched_type_queue = sched_type_queue << 38; + ev.event = sched_type_queue | (get_work0 & 0xffffffff); + if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV) + ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1, + (ev.event >> 20) & 0x7F, + OCCTX_RX_OFFLOAD_NONE | + OCCTX_RX_MULTI_SEG_F, + ws->lookup_mem); + else + ev.u64 = get_work1; + if (fn != NULL && ev.u64 != 0) fn(arg, ev); } @@ -246,3 +269,227 @@ ssows_reset(struct ssows *ws) ssows_swtag_untag(ws); } } + +static __rte_always_inline uint16_t +__sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[], + uint16_t nb_events, uint64_t *cmd, + const uint16_t flag) +{ + uint16_t port_id; + uint16_t queue_id; + struct rte_mbuf *m; + struct rte_eth_dev *ethdev; + struct ssows *ws = port; + struct octeontx_txq *txq; + + RTE_SET_USED(nb_events); + switch (ev->sched_type) { + case SSO_SYNC_ORDERED: + ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC); + rte_io_wmb(); + ssows_swtag_wait(ws); + break; + case SSO_SYNC_UNTAGGED: + ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC, + ev->queue_id); + rte_io_wmb(); + ssows_swtag_wait(ws); + break; + case SSO_SYNC_ATOMIC: + rte_io_wmb(); + break; + } + + m = ev[0].mbuf; + port_id = m->port; + queue_id = rte_event_eth_tx_adapter_txq_get(m); + ethdev = &rte_eth_devices[port_id]; + txq = ethdev->data->tx_queues[queue_id]; + + return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag); +} + +#define T(name, f3, f2, f1, f0, sz, flags) \ +static uint16_t __rte_noinline __rte_hot \ +sso_event_tx_adapter_enqueue_ ## name(void *port, struct rte_event ev[], \ + uint16_t nb_events) \ +{ \ + uint64_t cmd[sz]; \ + return __sso_event_tx_adapter_enqueue(port, ev, nb_events, cmd, \ + flags); \ +} + +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + +void +ssovf_fastpath_fns_set(struct rte_eventdev *dev) +{ + struct ssovf_evdev *edev = ssovf_pmd_priv(dev); + + dev->enqueue = ssows_enq; + dev->enqueue_burst = ssows_enq_burst; + dev->enqueue_new_burst = ssows_enq_new_burst; + dev->enqueue_forward_burst = ssows_enq_fwd_burst; + + const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = { +#define T(name, f3, f2, f1, f0, sz, flags) \ + [f3][f2][f1][f0] = sso_event_tx_adapter_enqueue_ ##name, + +SSO_TX_ADPTR_ENQ_FASTPATH_FUNC +#undef T + }; + + dev->txa_enqueue = ssow_txa_enqueue + [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)] + [!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)]; + + dev->txa_enqueue_same_dest = dev->txa_enqueue; + + /* Assigning dequeue func pointers */ + const event_dequeue_t ssow_deq[2][2][2] = { +#define R(name, f2, f1, f0, flags) \ + [f2][f1][f0] = ssows_deq_ ##name, + +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + dev->dequeue = ssow_deq + [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)] + [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)] + [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)]; + + const event_dequeue_burst_t ssow_deq_burst[2][2][2] = { +#define R(name, f2, f1, f0, flags) \ + [f2][f1][f0] = ssows_deq_burst_ ##name, + +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + dev->dequeue_burst = ssow_deq_burst + [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)] + [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)] + [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)]; + + if (edev->is_timeout_deq) { + const event_dequeue_t ssow_deq_timeout[2][2][2] = { +#define R(name, f2, f1, f0, flags) \ + [f2][f1][f0] = ssows_deq_timeout_ ##name, + +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + dev->dequeue = ssow_deq_timeout + [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)] + [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)] + [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)]; + + const event_dequeue_burst_t ssow_deq_timeout_burst[2][2][2] = { +#define R(name, f2, f1, f0, flags) \ + [f2][f1][f0] = ssows_deq_timeout_burst_ ##name, + +SSO_RX_ADPTR_ENQ_FASTPATH_FUNC +#undef R + }; + + dev->dequeue_burst = ssow_deq_timeout_burst + [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)] + [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)] + [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)]; + } +} + +static void +octeontx_create_rx_ol_flags_array(void *mem) +{ + uint16_t idx, errcode, errlev; + uint32_t val, *ol_flags; + + /* Skip ptype array memory */ + ol_flags = (uint32_t *)mem; + + for (idx = 0; idx < BIT(ERRCODE_ERRLEN_WIDTH); idx++) { + errcode = idx & 0xff; + errlev = (idx & 0x700) >> 8; + + val = PKT_RX_IP_CKSUM_UNKNOWN; + val |= PKT_RX_L4_CKSUM_UNKNOWN; + val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN; + + switch (errlev) { + case OCCTX_ERRLEV_RE: + if (errcode) { + val |= PKT_RX_IP_CKSUM_BAD; + val |= PKT_RX_L4_CKSUM_BAD; + } else { + val |= PKT_RX_IP_CKSUM_GOOD; + val |= PKT_RX_L4_CKSUM_GOOD; + } + break; + case OCCTX_ERRLEV_LC: + if (errcode == OCCTX_EC_IP4_CSUM) { + val |= PKT_RX_IP_CKSUM_BAD; + val |= PKT_RX_OUTER_IP_CKSUM_BAD; + } else { + val |= PKT_RX_IP_CKSUM_GOOD; + } + break; + case OCCTX_ERRLEV_LD: + /* Check if parsed packet is neither IPv4 or IPV6 */ + if (errcode == OCCTX_EC_IP4_NOT) + break; + val |= PKT_RX_IP_CKSUM_GOOD; + if (errcode == OCCTX_EC_L4_CSUM) + val |= PKT_RX_OUTER_L4_CKSUM_BAD; + else + val |= PKT_RX_L4_CKSUM_GOOD; + break; + case OCCTX_ERRLEV_LE: + if (errcode == OCCTX_EC_IP4_CSUM) + val |= PKT_RX_IP_CKSUM_BAD; + else + val |= PKT_RX_IP_CKSUM_GOOD; + break; + case OCCTX_ERRLEV_LF: + /* Check if parsed packet is neither IPv4 or IPV6 */ + if (errcode == OCCTX_EC_IP4_NOT) + break; + val |= PKT_RX_IP_CKSUM_GOOD; + if (errcode == OCCTX_EC_L4_CSUM) + val |= PKT_RX_L4_CKSUM_BAD; + else + val |= PKT_RX_L4_CKSUM_GOOD; + break; + } + + ol_flags[idx] = val; + } +} + +void * +octeontx_fastpath_lookup_mem_get(void) +{ + const char name[] = OCCTX_FASTPATH_LOOKUP_MEM; + const struct rte_memzone *mz; + void *mem; + + mz = rte_memzone_lookup(name); + if (mz != NULL) + return mz->addr; + + /* Request for the first time */ + mz = rte_memzone_reserve_aligned(name, LOOKUP_ARRAY_SZ, + SOCKET_ID_ANY, 0, OCCTX_ALIGN); + if (mz != NULL) { + mem = mz->addr; + /* Form the rx ol_flags based on errcode */ + octeontx_create_rx_ol_flags_array(mem); + return mem; + } + return NULL; +}