ssows_swtag_untag(ws);
}
-__rte_always_inline uint16_t __rte_hot
-ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
- struct ssows *ws = port;
-
- RTE_SET_USED(timeout_ticks);
-
- if (ws->swtag_req) {
- ws->swtag_req = 0;
- ssows_swtag_wait(ws);
- return 1;
- } else {
- return ssows_get_work(ws, ev);
- }
+#define R(name, f2, f1, f0, flags) \
+static uint16_t __rte_noinline __rte_hot \
+ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+{ \
+ struct ssows *ws = port; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ ssows_swtag_wait(ws); \
+ return 1; \
+ } else { \
+ return ssows_get_work(ws, ev, flags); \
+ } \
+} \
+ \
+static uint16_t __rte_hot \
+ssows_deq_burst_ ##name(void *port, struct rte_event ev[], \
+ uint16_t nb_events, uint64_t timeout_ticks) \
+{ \
+ RTE_SET_USED(nb_events); \
+ \
+ return ssows_deq_ ##name(port, ev, timeout_ticks); \
+} \
+ \
+static uint16_t __rte_hot \
+ssows_deq_timeout_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks) \
+{ \
+ struct ssows *ws = port; \
+ uint64_t iter; \
+ uint16_t ret = 1; \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ ssows_swtag_wait(ws); \
+ } else { \
+ ret = ssows_get_work(ws, ev, flags); \
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
+ ret = ssows_get_work(ws, ev, flags); \
+ } \
+ return ret; \
+} \
+ \
+static uint16_t __rte_hot \
+ssows_deq_timeout_burst_ ##name(void *port, struct rte_event ev[], \
+ uint16_t nb_events, uint64_t timeout_ticks) \
+{ \
+ RTE_SET_USED(nb_events); \
+ \
+ return ssows_deq_timeout_ ##name(port, ev, timeout_ticks); \
}
-__rte_always_inline uint16_t __rte_hot
-ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
- struct ssows *ws = port;
- uint64_t iter;
- uint16_t ret = 1;
-
- if (ws->swtag_req) {
- ws->swtag_req = 0;
- ssows_swtag_wait(ws);
- } else {
- ret = ssows_get_work(ws, ev);
- for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
- ret = ssows_get_work(ws, ev);
- }
- return ret;
-}
-
-uint16_t __rte_hot
-ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
- uint64_t timeout_ticks)
-{
- RTE_SET_USED(nb_events);
-
- return ssows_deq(port, ev, timeout_ticks);
-}
-
-uint16_t __rte_hot
-ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
- uint64_t timeout_ticks)
-{
- RTE_SET_USED(nb_events);
-
- return ssows_deq_timeout(port, ev, timeout_ticks);
-}
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
__rte_always_inline uint16_t __rte_hot
ssows_enq(void *port, const struct rte_event *ev)
ev.event = sched_type_queue | (get_work0 & 0xffffffff);
if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
- (ev.event >> 20) & 0x7F);
+ (ev.event >> 20) & 0x7F,
+ OCCTX_RX_OFFLOAD_NONE |
+ OCCTX_RX_MULTI_SEG_F,
+ ws->lookup_mem);
else
ev.u64 = get_work1;
}
}
-uint16_t
-sso_event_tx_adapter_enqueue(void *port,
- struct rte_event ev[], uint16_t nb_events)
+static __rte_always_inline uint16_t
+__sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t *cmd,
+ const uint16_t flag)
{
uint16_t port_id;
uint16_t queue_id;
struct rte_eth_dev *ethdev;
struct ssows *ws = port;
struct octeontx_txq *txq;
- uint64_t cmd[4];
RTE_SET_USED(nb_events);
switch (ev->sched_type) {
case SSO_SYNC_ORDERED:
ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
- rte_cio_wmb();
+ rte_io_wmb();
ssows_swtag_wait(ws);
break;
case SSO_SYNC_UNTAGGED:
ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
ev->queue_id);
- rte_cio_wmb();
+ rte_io_wmb();
ssows_swtag_wait(ws);
break;
case SSO_SYNC_ATOMIC:
- rte_cio_wmb();
+ rte_io_wmb();
break;
}
ethdev = &rte_eth_devices[port_id];
txq = ethdev->data->tx_queues[queue_id];
- return __octeontx_xmit_pkts(txq, &m, 1, cmd, OCCTX_TX_OFFLOAD_NONE);
+ return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag);
+}
+
+#define T(name, f3, f2, f1, f0, sz, flags) \
+static uint16_t __rte_noinline __rte_hot \
+sso_event_tx_adapter_enqueue_ ## name(void *port, struct rte_event ev[], \
+ uint16_t nb_events) \
+{ \
+ uint64_t cmd[sz]; \
+ return __sso_event_tx_adapter_enqueue(port, ev, nb_events, cmd, \
+ flags); \
+}
+
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+
+static uint16_t __rte_hot
+ssow_crypto_adapter_enqueue(void *port, struct rte_event ev[],
+ uint16_t nb_events)
+{
+ RTE_SET_USED(nb_events);
+
+ return otx_crypto_adapter_enqueue(port, ev->event_ptr);
+}
+
+void
+ssovf_fastpath_fns_set(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ dev->enqueue = ssows_enq;
+ dev->enqueue_burst = ssows_enq_burst;
+ dev->enqueue_new_burst = ssows_enq_new_burst;
+ dev->enqueue_forward_burst = ssows_enq_fwd_burst;
+
+ dev->ca_enqueue = ssow_crypto_adapter_enqueue;
+
+ const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = {
+#define T(name, f3, f2, f1, f0, sz, flags) \
+ [f3][f2][f1][f0] = sso_event_tx_adapter_enqueue_ ##name,
+
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+ };
+
+ dev->txa_enqueue = ssow_txa_enqueue
+ [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)]
+ [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
+ [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)]
+ [!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
+
+ dev->txa_enqueue_same_dest = dev->txa_enqueue;
+
+ /* Assigning dequeue func pointers */
+ const event_dequeue_t ssow_deq[2][2][2] = {
+#define R(name, f2, f1, f0, flags) \
+ [f2][f1][f0] = ssows_deq_ ##name,
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ dev->dequeue = ssow_deq
+ [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+
+ const event_dequeue_burst_t ssow_deq_burst[2][2][2] = {
+#define R(name, f2, f1, f0, flags) \
+ [f2][f1][f0] = ssows_deq_burst_ ##name,
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ dev->dequeue_burst = ssow_deq_burst
+ [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+
+ if (edev->is_timeout_deq) {
+ const event_dequeue_t ssow_deq_timeout[2][2][2] = {
+#define R(name, f2, f1, f0, flags) \
+ [f2][f1][f0] = ssows_deq_timeout_ ##name,
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ dev->dequeue = ssow_deq_timeout
+ [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+
+ const event_dequeue_burst_t ssow_deq_timeout_burst[2][2][2] = {
+#define R(name, f2, f1, f0, flags) \
+ [f2][f1][f0] = ssows_deq_timeout_burst_ ##name,
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ dev->dequeue_burst = ssow_deq_timeout_burst
+ [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+ }
+}
+
+static void
+octeontx_create_rx_ol_flags_array(void *mem)
+{
+ uint16_t idx, errcode, errlev;
+ uint32_t val, *ol_flags;
+
+ /* Skip ptype array memory */
+ ol_flags = (uint32_t *)mem;
+
+ for (idx = 0; idx < BIT(ERRCODE_ERRLEN_WIDTH); idx++) {
+ errcode = idx & 0xff;
+ errlev = (idx & 0x700) >> 8;
+
+ val = PKT_RX_IP_CKSUM_UNKNOWN;
+ val |= PKT_RX_L4_CKSUM_UNKNOWN;
+ val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
+
+ switch (errlev) {
+ case OCCTX_ERRLEV_RE:
+ if (errcode) {
+ val |= PKT_RX_IP_CKSUM_BAD;
+ val |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ break;
+ case OCCTX_ERRLEV_LC:
+ if (errcode == OCCTX_EC_IP4_CSUM) {
+ val |= PKT_RX_IP_CKSUM_BAD;
+ val |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ } else {
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ }
+ break;
+ case OCCTX_ERRLEV_LD:
+ /* Check if parsed packet is neither IPv4 or IPV6 */
+ if (errcode == OCCTX_EC_IP4_NOT)
+ break;
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ if (errcode == OCCTX_EC_L4_CSUM)
+ val |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ else
+ val |= PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case OCCTX_ERRLEV_LE:
+ if (errcode == OCCTX_EC_IP4_CSUM)
+ val |= PKT_RX_IP_CKSUM_BAD;
+ else
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ break;
+ case OCCTX_ERRLEV_LF:
+ /* Check if parsed packet is neither IPv4 or IPV6 */
+ if (errcode == OCCTX_EC_IP4_NOT)
+ break;
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ if (errcode == OCCTX_EC_L4_CSUM)
+ val |= PKT_RX_L4_CKSUM_BAD;
+ else
+ val |= PKT_RX_L4_CKSUM_GOOD;
+ break;
+ }
+
+ ol_flags[idx] = val;
+ }
+}
+
+void *
+octeontx_fastpath_lookup_mem_get(void)
+{
+ const char name[] = OCCTX_FASTPATH_LOOKUP_MEM;
+ const struct rte_memzone *mz;
+ void *mem;
+
+ mz = rte_memzone_lookup(name);
+ if (mz != NULL)
+ return mz->addr;
+
+ /* Request for the first time */
+ mz = rte_memzone_reserve_aligned(name, LOOKUP_ARRAY_SZ,
+ SOCKET_ID_ANY, 0, OCCTX_ALIGN);
+ if (mz != NULL) {
+ mem = mz->addr;
+ /* Form the rx ol_flags based on errcode */
+ octeontx_create_rx_ol_flags_array(mem);
+ return mem;
+ }
+ return NULL;
}