-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium networks Ltd. 2017.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#include "ssovf_worker.h"
ssows_swtag_untag(ws);
}
-__rte_always_inline uint16_t __hot
-ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
- struct ssows *ws = port;
-
- RTE_SET_USED(timeout_ticks);
-
- if (ws->swtag_req) {
- ws->swtag_req = 0;
- ssows_swtag_wait(ws);
- return 1;
- } else {
- return ssows_get_work(ws, ev);
- }
-}
-
-__rte_always_inline uint16_t __hot
-ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
- struct ssows *ws = port;
- uint64_t iter;
- uint16_t ret = 1;
-
- if (ws->swtag_req) {
- ws->swtag_req = 0;
- ssows_swtag_wait(ws);
- } else {
- ret = ssows_get_work(ws, ev);
- for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
- ret = ssows_get_work(ws, ev);
- }
- return ret;
-}
-
-uint16_t __hot
-ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
- uint64_t timeout_ticks)
-{
- RTE_SET_USED(nb_events);
-
- return ssows_deq(port, ev, timeout_ticks);
+#define R(name, f2, f1, f0, flags) \
+static uint16_t __rte_noinline __rte_hot \
+ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+{ \
+ struct ssows *ws = port; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ ssows_swtag_wait(ws); \
+ return 1; \
+ } else { \
+ return ssows_get_work(ws, ev, flags); \
+ } \
+} \
+ \
+static uint16_t __rte_hot \
+ssows_deq_burst_ ##name(void *port, struct rte_event ev[], \
+ uint16_t nb_events, uint64_t timeout_ticks) \
+{ \
+ RTE_SET_USED(nb_events); \
+ \
+ return ssows_deq_ ##name(port, ev, timeout_ticks); \
+} \
+ \
+static uint16_t __rte_hot \
+ssows_deq_timeout_ ##name(void *port, struct rte_event *ev, \
+ uint64_t timeout_ticks) \
+{ \
+ struct ssows *ws = port; \
+ uint64_t iter; \
+ uint16_t ret = 1; \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ ssows_swtag_wait(ws); \
+ } else { \
+ ret = ssows_get_work(ws, ev, flags); \
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
+ ret = ssows_get_work(ws, ev, flags); \
+ } \
+ return ret; \
+} \
+ \
+static uint16_t __rte_hot \
+ssows_deq_timeout_burst_ ##name(void *port, struct rte_event ev[], \
+ uint16_t nb_events, uint64_t timeout_ticks) \
+{ \
+ RTE_SET_USED(nb_events); \
+ \
+ return ssows_deq_timeout_ ##name(port, ev, timeout_ticks); \
}
-uint16_t __hot
-ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
- uint64_t timeout_ticks)
-{
- RTE_SET_USED(nb_events);
-
- return ssows_deq_timeout(port, ev, timeout_ticks);
-}
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
-__rte_always_inline uint16_t __hot
+__rte_always_inline uint16_t __rte_hot
ssows_enq(void *port, const struct rte_event *ev)
{
struct ssows *ws = port;
return ret;
}
-uint16_t __hot
+uint16_t __rte_hot
ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
{
RTE_SET_USED(nb_events);
return ssows_enq(port, ev);
}
-uint16_t __hot
+uint16_t __rte_hot
ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
{
uint16_t i;
return nb_events;
}
-uint16_t __hot
+uint16_t __rte_hot
ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
{
struct ssows *ws = port;
}
void
-ssows_flush_events(struct ssows *ws, uint8_t queue_id)
+ssows_flush_events(struct ssows *ws, uint8_t queue_id,
+ ssows_handle_event_t fn, void *arg)
{
uint32_t reg_off;
- uint64_t aq_cnt = 1;
- uint64_t cq_ds_cnt = 1;
- uint64_t enable, get_work0, get_work1;
- uint8_t *base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
+ struct rte_event ev;
+ uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
+ uint64_t get_work0, get_work1;
+ uint64_t sched_type_queue;
+ uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
enable = ssovf_read64(base + SSO_VHGRP_QCTL);
if (!enable)
cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
/* Extract cq and ds count */
cq_ds_cnt &= 0x1FFF1FFF0000;
+
ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
- }
- RTE_SET_USED(get_work0);
- RTE_SET_USED(get_work1);
+ sched_type_queue = (get_work0 >> 32) & 0xfff;
+ ws->cur_tt = sched_type_queue & 0x3;
+ ws->cur_grp = sched_type_queue >> 2;
+ sched_type_queue = sched_type_queue << 38;
+ ev.event = sched_type_queue | (get_work0 & 0xffffffff);
+ if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
+ ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
+ (ev.event >> 20) & 0x7F,
+ OCCTX_RX_OFFLOAD_NONE |
+ OCCTX_RX_MULTI_SEG_F,
+ ws->lookup_mem);
+ else
+ ev.u64 = get_work1;
+
+ if (fn != NULL && ev.u64 != 0)
+ fn(arg, ev);
+ }
}
void
ssows_swtag_untag(ws);
}
}
+
+static __rte_always_inline uint16_t
+__sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t *cmd,
+ const uint16_t flag)
+{
+ uint16_t port_id;
+ uint16_t queue_id;
+ struct rte_mbuf *m;
+ struct rte_eth_dev *ethdev;
+ struct ssows *ws = port;
+ struct octeontx_txq *txq;
+
+ RTE_SET_USED(nb_events);
+ switch (ev->sched_type) {
+ case SSO_SYNC_ORDERED:
+ ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
+ rte_io_wmb();
+ ssows_swtag_wait(ws);
+ break;
+ case SSO_SYNC_UNTAGGED:
+ ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
+ ev->queue_id);
+ rte_io_wmb();
+ ssows_swtag_wait(ws);
+ break;
+ case SSO_SYNC_ATOMIC:
+ rte_io_wmb();
+ break;
+ }
+
+ m = ev[0].mbuf;
+ port_id = m->port;
+ queue_id = rte_event_eth_tx_adapter_txq_get(m);
+ ethdev = &rte_eth_devices[port_id];
+ txq = ethdev->data->tx_queues[queue_id];
+
+ return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag);
+}
+
+#define T(name, f3, f2, f1, f0, sz, flags) \
+static uint16_t __rte_noinline __rte_hot \
+sso_event_tx_adapter_enqueue_ ## name(void *port, struct rte_event ev[], \
+ uint16_t nb_events) \
+{ \
+ uint64_t cmd[sz]; \
+ return __sso_event_tx_adapter_enqueue(port, ev, nb_events, cmd, \
+ flags); \
+}
+
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+
+void
+ssovf_fastpath_fns_set(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ dev->enqueue = ssows_enq;
+ dev->enqueue_burst = ssows_enq_burst;
+ dev->enqueue_new_burst = ssows_enq_new_burst;
+ dev->enqueue_forward_burst = ssows_enq_fwd_burst;
+
+ const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = {
+#define T(name, f3, f2, f1, f0, sz, flags) \
+ [f3][f2][f1][f0] = sso_event_tx_adapter_enqueue_ ##name,
+
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+ };
+
+ dev->txa_enqueue = ssow_txa_enqueue
+ [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)]
+ [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
+ [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)]
+ [!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
+
+ dev->txa_enqueue_same_dest = dev->txa_enqueue;
+
+ /* Assigning dequeue func pointers */
+ const event_dequeue_t ssow_deq[2][2][2] = {
+#define R(name, f2, f1, f0, flags) \
+ [f2][f1][f0] = ssows_deq_ ##name,
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ dev->dequeue = ssow_deq
+ [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+
+ const event_dequeue_burst_t ssow_deq_burst[2][2][2] = {
+#define R(name, f2, f1, f0, flags) \
+ [f2][f1][f0] = ssows_deq_burst_ ##name,
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ dev->dequeue_burst = ssow_deq_burst
+ [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+
+ if (edev->is_timeout_deq) {
+ const event_dequeue_t ssow_deq_timeout[2][2][2] = {
+#define R(name, f2, f1, f0, flags) \
+ [f2][f1][f0] = ssows_deq_timeout_ ##name,
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ dev->dequeue = ssow_deq_timeout
+ [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+
+ const event_dequeue_burst_t ssow_deq_timeout_burst[2][2][2] = {
+#define R(name, f2, f1, f0, flags) \
+ [f2][f1][f0] = ssows_deq_timeout_burst_ ##name,
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+ };
+
+ dev->dequeue_burst = ssow_deq_timeout_burst
+ [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+ }
+}
+
+static void
+octeontx_create_rx_ol_flags_array(void *mem)
+{
+ uint16_t idx, errcode, errlev;
+ uint32_t val, *ol_flags;
+
+ /* Skip ptype array memory */
+ ol_flags = (uint32_t *)mem;
+
+ for (idx = 0; idx < BIT(ERRCODE_ERRLEN_WIDTH); idx++) {
+ errcode = idx & 0xff;
+ errlev = (idx & 0x700) >> 8;
+
+ val = PKT_RX_IP_CKSUM_UNKNOWN;
+ val |= PKT_RX_L4_CKSUM_UNKNOWN;
+ val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
+
+ switch (errlev) {
+ case OCCTX_ERRLEV_RE:
+ if (errcode) {
+ val |= PKT_RX_IP_CKSUM_BAD;
+ val |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ break;
+ case OCCTX_ERRLEV_LC:
+ if (errcode == OCCTX_EC_IP4_CSUM) {
+ val |= PKT_RX_IP_CKSUM_BAD;
+ val |= PKT_RX_EIP_CKSUM_BAD;
+ } else {
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ }
+ break;
+ case OCCTX_ERRLEV_LD:
+ /* Check if parsed packet is neither IPv4 or IPV6 */
+ if (errcode == OCCTX_EC_IP4_NOT)
+ break;
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ if (errcode == OCCTX_EC_L4_CSUM)
+ val |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ else
+ val |= PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case OCCTX_ERRLEV_LE:
+ if (errcode == OCCTX_EC_IP4_CSUM)
+ val |= PKT_RX_IP_CKSUM_BAD;
+ else
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ break;
+ case OCCTX_ERRLEV_LF:
+ /* Check if parsed packet is neither IPv4 or IPV6 */
+ if (errcode == OCCTX_EC_IP4_NOT)
+ break;
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ if (errcode == OCCTX_EC_L4_CSUM)
+ val |= PKT_RX_L4_CKSUM_BAD;
+ else
+ val |= PKT_RX_L4_CKSUM_GOOD;
+ break;
+ }
+
+ ol_flags[idx] = val;
+ }
+}
+
+void *
+octeontx_fastpath_lookup_mem_get(void)
+{
+ const char name[] = OCCTX_FASTPATH_LOOKUP_MEM;
+ const struct rte_memzone *mz;
+ void *mem;
+
+ mz = rte_memzone_lookup(name);
+ if (mz != NULL)
+ return mz->addr;
+
+ /* Request for the first time */
+ mz = rte_memzone_reserve_aligned(name, LOOKUP_ARRAY_SZ,
+ SOCKET_ID_ANY, 0, OCCTX_ALIGN);
+ if (mz != NULL) {
+ mem = mz->addr;
+ /* Form the rx ol_flags based on errcode */
+ octeontx_create_rx_ol_flags_array(mem);
+ return mem;
+ }
+ return NULL;
+}