Add support for event eth Rx adapter.
Resize cn10k workslot fastpath structure to fit in 64B cacheline size.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
time granularity of 2.5us on CN9K and 1us on CN10K.
- Up to 256 TIM rings a.k.a event timer adapters.
- Up to 8 rings traversed in parallel.
+- HW managed packets enqueued from ethdev to eventdev exposed through event eth
+ RX adapter.
+- N:1 ethernet device Rx queue to Event queue mapping.
+- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+ capability while maintaining receive packet order.
+- Full Rx/Tx offload support defined through ethdev queue configuration.
+- HW managed event vectorization on CN10K for packets enqueued from ethdev to
+ eventdev configurable per each Rx queue in Rx adapter.
+- Event vector transmission via Tx adapter.
Prerequisites and Compilation procedure
---------------------------------------
-a 0002:0e:00.0,qos=[1-50-50-50]
+- ``Force Rx Back pressure``
+
+ Force Rx back pressure when same mempool is used across ethernet device
+ connected to event device.
+
+ For example::
+
+ -a 0002:0e:00.0,force_rx_bp=1
+
- ``TIM disable NPA``
By default chunks are allocated from NPA then TIM can automatically free
+---+------------+-------------------------------------------------------+
| 2 | TIM | --log-level='pmd\.event\.cnxk\.timer,8' |
+---+------------+-------------------------------------------------------+
+
+Limitations
+-----------
+
+Rx adapter support
+~~~~~~~~~~~~~~~~~~
+
+Using the same mempool for all the ethernet device ports connected to
+event device would cause back pressure to be asserted only on the first
+ethernet device.
+Back pressure is automatically disabled when using same mempool for all the
+ethernet devices connected to event device to override this applications can
+use `force_rx_bp=1` device arguments.
+Using unique mempool per each ethernet device is recommended when they are
+connected to event device.
usecases. Configuration happens via standard rawdev enq/deq operations. See
the :doc:`../rawdevs/cnxk_bphy` rawdev guide for more details on this driver.
+* **Added support for Marvell CN10K, CN9K, event Rx adapter.**
+
+ * Added Rx adapter support for event/cnxk when the ethernet device requested
+ is net/cnxk.
+
* **Added cppc_cpufreq support to Power Management library.**
Added support for cppc_cpufreq driver which works on most arm64 platforms.
enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
+void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
+ uint8_t ena, uint8_t force);
+
/* NPC */
int __roc_api roc_nix_npc_promisc_ena_dis(struct roc_nix *roc_nix, int enable);
exit:
return rc;
}
+
+void
+rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena,
+ uint8_t force)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct npa_lf *lf = idev_npa_obj_get();
+ struct npa_aq_enq_req *req;
+ struct npa_aq_enq_rsp *rsp;
+ struct mbox *mbox;
+ uint32_t limit;
+ int rc;
+
+ if (roc_nix_is_sdp(roc_nix))
+ return;
+
+ if (!lf)
+ return;
+ mbox = lf->mbox;
+
+ req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (req == NULL)
+ return;
+
+ req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_READ;
+
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return;
+
+ limit = rsp->aura.limit;
+ /* BP is already enabled. */
+ if (rsp->aura.bp_ena) {
+ /* If BP ids don't match disable BP. */
+ if ((rsp->aura.nix0_bpid != nix->bpid[0]) && !force) {
+ req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (req == NULL)
+ return;
+
+ req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_WRITE;
+
+ req->aura.bp_ena = 0;
+ req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
+
+ mbox_process(mbox);
+ }
+ return;
+ }
+
+ /* BP was previously enabled but now disabled skip. */
+ if (rsp->aura.bp)
+ return;
+
+ req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (req == NULL)
+ return;
+
+ req->aura_id = roc_npa_aura_handle_to_aura(pool_id);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_WRITE;
+
+ if (ena) {
+ req->aura.nix0_bpid = nix->bpid[0];
+ req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
+ req->aura.bp = NIX_RQ_AURA_THRESH(
+ limit > 128 ? 256 : limit); /* 95% of size*/
+ req->aura_mask.bp = ~(req->aura_mask.bp);
+ }
+
+ req->aura.bp_ena = !!ena;
+ req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
+
+ mbox_process(mbox);
+}
#define NIX_SQB_LOWER_THRESH ((uint16_t)70)
/* Apply BP/DROP when CQ is 95% full */
-#define NIX_CQ_THRESH_LEVEL (5 * 256 / 100)
+#define NIX_CQ_THRESH_LEVEL (5 * 256 / 100)
+#define NIX_RQ_AURA_THRESH(x) (((x) * 95) / 100)
/* IRQ triggered when NIX_LF_CINTX_CNT[QCOUNT] crosses this value */
#define CQ_CQE_THRESH_DEFAULT 0x1ULL
roc_nix_fc_config_set;
roc_nix_fc_mode_set;
roc_nix_fc_mode_get;
+ rox_nix_fc_npa_bp_cfg;
roc_nix_get_base_chan;
roc_nix_get_pf;
roc_nix_get_pf_func;
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
-static void
-cn10k_init_hws_ops(struct cn10k_sso_hws *ws, uintptr_t base)
-{
- ws->tag_wqe_op = base + SSOW_LF_GWS_WQE0;
- ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
- ws->updt_wqe_op = base + SSOW_LF_GWS_OP_UPD_WQP_GRP1;
- ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
- ws->swtag_untag_op = base + SSOW_LF_GWS_OP_SWTAG_UNTAG;
- ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
- ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
-}
-
static uint32_t
cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
{
/* First cache line is reserved for cookie */
ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
- cn10k_init_hws_ops(ws, ws->base);
ws->hws_id = port_id;
ws->swtag_req = 0;
ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
cq_ds_cnt &= 0x3FFF3FFF0000;
while (aq_cnt || cq_ds_cnt || ds_cnt) {
- plt_write64(req, ws->getwrk_op);
+ plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
cn10k_sso_hws_get_work_empty(ws, &ev);
if (fn != NULL && ev.u64 != 0)
fn(arg, ev);
if (ev.sched_type != SSO_TT_EMPTY)
- cnxk_sso_hws_swtag_flush(ws->tag_wqe_op,
- ws->swtag_flush_op);
+ cnxk_sso_hws_swtag_flush(
+ ws->base + SSOW_LF_GWS_WQE0,
+ ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
do {
val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
} while (val & BIT_ULL(56));
if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) !=
SSO_TT_EMPTY) {
- plt_write64(BIT_ULL(16) | 1, ws->getwrk_op);
+ plt_write64(BIT_ULL(16) | 1,
+ ws->base + SSOW_LF_GWS_OP_GET_WORK0);
do {
- roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);
+ roc_load_pair(gw.u64[0], gw.u64[1],
+ ws->base + SSOW_LF_GWS_WQE0);
} while (gw.u64[0] & BIT_ULL(63));
pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
return cnxk_sso_selftest(RTE_STR(event_cn10k));
}
+static int
+cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+ int rc;
+
+ RTE_SET_USED(event_dev);
+ rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 9);
+ if (rc)
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+ else
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
+
+ return 0;
+}
+
+static void
+cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
+ void *tstmp_info)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ int i;
+
+ for (i = 0; i < dev->nb_event_ports; i++) {
+ struct cn10k_sso_hws *ws = event_dev->data->ports[i];
+ ws->lookup_mem = lookup_mem;
+ ws->tstamp = tstmp_info;
+ }
+}
+
+static int
+cn10k_sso_rx_adapter_queue_add(
+ const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct cn10k_eth_rxq *rxq;
+ void *lookup_mem;
+ void *tstmp_info;
+ int rc;
+
+ rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
+ if (rc)
+ return -EINVAL;
+
+ rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
+ queue_conf);
+ if (rc)
+ return -EINVAL;
+ rxq = eth_dev->data->rx_queues[0];
+ lookup_mem = rxq->lookup_mem;
+ tstmp_info = rxq->tstamp;
+ cn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
+ cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+ return 0;
+}
+
+static int
+cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id)
+{
+ int rc;
+
+ rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
+ if (rc)
+ return -EINVAL;
+
+ return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
+}
+
static struct rte_eventdev_ops cn10k_sso_dev_ops = {
.dev_infos_get = cn10k_sso_info_get,
.dev_configure = cn10k_sso_dev_configure,
.port_unlink = cn10k_sso_port_unlink,
.timeout_ticks = cnxk_sso_timeout_ticks,
+ .eth_rx_adapter_caps_get = cn10k_sso_rx_adapter_caps_get,
+ .eth_rx_adapter_queue_add = cn10k_sso_rx_adapter_queue_add,
+ .eth_rx_adapter_queue_del = cn10k_sso_rx_adapter_queue_del,
+ .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
+ .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
+
.timer_adapter_caps_get = cnxk_tim_caps_get,
.dump = cnxk_sso_dump,
RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
CNXK_SSO_GGRP_QOS "=<string>"
+ CNXK_SSO_FORCE_BP "=1"
CN10K_SSO_GW_MODE "=<int>"
CNXK_TIM_DISABLE_NPA "=1"
CNXK_TIM_CHNK_SLOTS "=<int>"
cn10k_sso_hws_forward_event(ws, ev);
break;
case RTE_EVENT_OP_RELEASE:
- cnxk_sso_hws_swtag_flush(ws->tag_wqe_op, ws->swtag_flush_op);
+ cnxk_sso_hws_swtag_flush(ws->base + SSOW_LF_GWS_WQE0,
+ ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
break;
default:
return 0;
if (ws->swtag_req) {
ws->swtag_req = 0;
- cnxk_sso_hws_swtag_wait(ws->tag_wqe_op);
+ cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0);
return 1;
}
if (ws->swtag_req) {
ws->swtag_req = 0;
- cnxk_sso_hws_swtag_wait(ws->tag_wqe_op);
+ cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0);
return ret;
}
#ifndef __CN10K_WORKER_H__
#define __CN10K_WORKER_H__
+#include "cnxk_ethdev.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
+#include "cn10k_ethdev.h"
+#include "cn10k_rx.h"
+
/* SSO Operations */
static __rte_always_inline uint8_t
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
- const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(ws->tag_wqe_op));
+ const uint8_t cur_tt =
+ CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0));
/* CNXK model
* cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
if (new_tt == SSO_TT_UNTAGGED) {
if (cur_tt != SSO_TT_UNTAGGED)
- cnxk_sso_hws_swtag_untag(ws->swtag_untag_op);
+ cnxk_sso_hws_swtag_untag(ws->base +
+ SSOW_LF_GWS_OP_SWTAG_UNTAG);
} else {
- cnxk_sso_hws_swtag_norm(tag, new_tt, ws->swtag_norm_op);
+ cnxk_sso_hws_swtag_norm(tag, new_tt,
+ ws->base + SSOW_LF_GWS_OP_SWTAG_NORM);
}
ws->swtag_req = 1;
}
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
- plt_write64(ev->u64, ws->updt_wqe_op);
- cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);
+ plt_write64(ev->u64, ws->base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
+ cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
+ ws->base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
}
static __rte_always_inline void
const uint8_t grp = ev->queue_id;
/* Group hasn't changed, Use SWTAG to forward the event */
- if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_wqe_op)) == grp)
+ if (CNXK_GRP_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0)) == grp)
cn10k_sso_hws_fwd_swtag(ws, ev);
else
/*
PLT_CPU_FEATURE_PREAMBLE
"caspl %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n"
: [wdata] "+r"(gw.get_work)
- : [gw_loc] "r"(ws->getwrk_op)
+ : [gw_loc] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0)
: "memory");
#else
- plt_write64(gw.u64[0], ws->getwrk_op);
+ plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0);
do {
- roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);
+ roc_load_pair(gw.u64[0], gw.u64[1],
+ ws->base + SSOW_LF_GWS_WQE0);
} while (gw.u64[0] & BIT_ULL(63));
#endif
gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
" tbnz %[tag], 63, rty%= \n"
"done%=: dmb ld \n"
: [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
- : [tag_loc] "r"(ws->tag_wqe_op)
+ : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0)
: "memory");
#else
do {
- roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);
+ roc_load_pair(gw.u64[0], gw.u64[1],
+ ws->base + SSOW_LF_GWS_WQE0);
} while (gw.u64[0] & BIT_ULL(63));
#endif
return cnxk_sso_selftest(RTE_STR(event_cn9k));
}
+static int
+cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+ int rc;
+
+ RTE_SET_USED(event_dev);
+ rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
+ if (rc)
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+ else
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
+
+ return 0;
+}
+
+static void
+cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
+ void *tstmp_info)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ int i;
+
+ for (i = 0; i < dev->nb_event_ports; i++) {
+ if (dev->dual_ws) {
+ struct cn9k_sso_hws_dual *dws =
+ event_dev->data->ports[i];
+ dws->lookup_mem = lookup_mem;
+ dws->tstamp = tstmp_info;
+ } else {
+ struct cn9k_sso_hws *ws = event_dev->data->ports[i];
+ ws->lookup_mem = lookup_mem;
+ ws->tstamp = tstmp_info;
+ }
+ }
+}
+
+static int
+cn9k_sso_rx_adapter_queue_add(
+ const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct cn9k_eth_rxq *rxq;
+ void *lookup_mem;
+ void *tstmp_info;
+ int rc;
+
+ rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
+ if (rc)
+ return -EINVAL;
+
+ rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
+ queue_conf);
+ if (rc)
+ return -EINVAL;
+
+ rxq = eth_dev->data->rx_queues[0];
+ lookup_mem = rxq->lookup_mem;
+ tstmp_info = rxq->tstamp;
+ cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
+ cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+ return 0;
+}
+
+static int
+cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id)
+{
+ int rc;
+
+ rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
+ if (rc)
+ return -EINVAL;
+
+ return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
+}
+
static struct rte_eventdev_ops cn9k_sso_dev_ops = {
.dev_infos_get = cn9k_sso_info_get,
.dev_configure = cn9k_sso_dev_configure,
.port_unlink = cn9k_sso_port_unlink,
.timeout_ticks = cnxk_sso_timeout_ticks,
+ .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
+ .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
+ .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
+ .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
+ .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
+
.timer_adapter_caps_get = cnxk_tim_caps_get,
.dump = cnxk_sso_dump,
RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
CNXK_SSO_GGRP_QOS "=<string>"
+ CNXK_SSO_FORCE_BP "=1"
CN9K_SSO_SINGLE_WS "=1"
CNXK_TIM_DISABLE_NPA "=1"
CNXK_TIM_CHNK_SLOTS "=<int>"
#ifndef __CN9K_WORKER_H__
#define __CN9K_WORKER_H__
+#include "cnxk_ethdev.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
+#include "cn9k_ethdev.h"
+#include "cn9k_rx.h"
+
/* SSO Operations */
static __rte_always_inline uint8_t
&dev->xae_cnt);
rte_kvargs_process(kvlist, CNXK_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
dev);
+ rte_kvargs_process(kvlist, CNXK_SSO_FORCE_BP, &parse_kvargs_value,
+ &dev->force_ena_bp);
rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_value,
&single_ws);
rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_value,
#define __CNXK_EVENTDEV_H__
#include <rte_devargs.h>
+#include <rte_ethdev.h>
+#include <rte_event_eth_rx_adapter.h>
#include <rte_kvargs.h>
#include <rte_mbuf_pool_ops.h>
#include <rte_pci.h>
#define CNXK_SSO_XAE_CNT "xae_cnt"
#define CNXK_SSO_GGRP_QOS "qos"
+#define CNXK_SSO_FORCE_BP "force_rx_bp"
#define CN9K_SSO_SINGLE_WS "single_ws"
#define CN10K_SSO_GW_MODE "gw_mode"
uint64_t nb_xaq_cfg;
rte_iova_t fc_iova;
struct rte_mempool *xaq_pool;
+ uint64_t rx_offloads;
uint64_t adptr_xae_cnt;
+ uint16_t rx_adptr_pool_cnt;
+ uint64_t *rx_adptr_pools;
uint16_t tim_adptr_ring_cnt;
uint16_t *timer_adptr_rings;
uint64_t *timer_adptr_sz;
uint32_t xae_cnt;
uint8_t qos_queue_cnt;
struct cnxk_sso_qos *qos_parse_data;
+ uint8_t force_ena_bp;
/* CN9K */
uint8_t dual_ws;
/* CN10K */
uint8_t gw_mode;
} __rte_cache_aligned;
-/* CN10K HWS ops */
-#define CN10K_SSO_HWS_OPS \
- uintptr_t swtag_desched_op; \
- uintptr_t swtag_flush_op; \
- uintptr_t swtag_untag_op; \
- uintptr_t swtag_norm_op; \
- uintptr_t updt_wqe_op; \
- uintptr_t tag_wqe_op; \
- uintptr_t getwrk_op
-
struct cn10k_sso_hws {
- /* Get Work Fastpath data */
- CN10K_SSO_HWS_OPS;
+ uint64_t base;
+ /* PTP timestamp */
+ struct cnxk_timesync_info *tstamp;
+ void *lookup_mem;
uint32_t gw_wdata;
uint8_t swtag_req;
uint8_t hws_id;
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;
uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
- uint64_t base;
uintptr_t lmt_base;
} __rte_cache_aligned;
struct cn9k_sso_hws {
/* Get Work Fastpath data */
CN9K_SSO_HWS_OPS;
+ /* PTP timestamp */
+ struct cnxk_timesync_info *tstamp;
+ void *lookup_mem;
uint8_t swtag_req;
uint8_t hws_id;
/* Add Work Fastpath data */
struct cn9k_sso_hws_dual {
/* Get Work Fastpath data */
struct cn9k_sso_hws_state ws_state[2]; /* Ping and Pong */
+ /* PTP timestamp */
+ struct cnxk_timesync_info *tstamp;
+ void *lookup_mem;
uint8_t swtag_req;
uint8_t vws; /* Ping pong bit */
uint8_t hws_id;
/* CN9K */
void cn9k_sso_set_rsrc(void *arg);
+/* Common adapter ops */
+int cnxk_sso_rx_adapter_queue_add(
+ const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+int cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id);
+int cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev);
+int cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev);
+
#endif /* __CNXK_EVENTDEV_H__ */
* Copyright(C) 2021 Marvell.
*/
+#include "cnxk_ethdev.h"
#include "cnxk_eventdev.h"
void
int i;
switch (event_type) {
+ case RTE_EVENT_TYPE_ETHDEV: {
+ struct cnxk_eth_rxq_sp *rxq = data;
+ uint64_t *old_ptr;
+
+ for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
+ if ((uint64_t)rxq->qconf.mp == dev->rx_adptr_pools[i])
+ return;
+ }
+
+ dev->rx_adptr_pool_cnt++;
+ old_ptr = dev->rx_adptr_pools;
+ dev->rx_adptr_pools = rte_realloc(
+ dev->rx_adptr_pools,
+ sizeof(uint64_t) * dev->rx_adptr_pool_cnt, 0);
+ if (dev->rx_adptr_pools == NULL) {
+ dev->adptr_xae_cnt += rxq->qconf.mp->size;
+ dev->rx_adptr_pools = old_ptr;
+ dev->rx_adptr_pool_cnt--;
+ return;
+ }
+ dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
+ (uint64_t)rxq->qconf.mp;
+
+ dev->adptr_xae_cnt += rxq->qconf.mp->size;
+ break;
+ }
case RTE_EVENT_TYPE_TIMER: {
struct cnxk_tim_ring *timr = data;
uint16_t *old_ring_ptr;
break;
}
}
+
+static int
+cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
+ uint16_t port_id, const struct rte_event *ev,
+ uint8_t custom_flowid)
+{
+ struct roc_nix_rq *rq;
+
+ rq = &cnxk_eth_dev->rqs[rq_id];
+ rq->sso_ena = 1;
+ rq->tt = ev->sched_type;
+ rq->hwgrp = ev->queue_id;
+ rq->flow_tag_width = 20;
+ rq->wqe_skip = 1;
+ rq->tag_mask = (port_id & 0xF) << 20;
+ rq->tag_mask |= (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4))
+ << 24;
+
+ if (custom_flowid) {
+ rq->flow_tag_width = 0;
+ rq->tag_mask |= ev->flow_id;
+ }
+
+ return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+}
+
+static int
+cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
+{
+ struct roc_nix_rq *rq;
+
+ rq = &cnxk_eth_dev->rqs[rq_id];
+ rq->sso_ena = 0;
+ rq->flow_tag_width = 32;
+ rq->tag_mask = 0;
+
+ return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+}
+
+int
+cnxk_sso_rx_adapter_queue_add(
+ const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ uint16_t port = eth_dev->data->port_id;
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ int i, rc = 0;
+
+ if (rx_queue_id < 0) {
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+ rc |= cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev,
+ i, queue_conf);
+ } else {
+ rxq_sp = cnxk_eth_rxq_to_sp(
+ eth_dev->data->rx_queues[rx_queue_id]);
+ cnxk_sso_updt_xae_cnt(dev, rxq_sp, RTE_EVENT_TYPE_ETHDEV);
+ rc = cnxk_sso_xae_reconfigure(
+ (struct rte_eventdev *)(uintptr_t)event_dev);
+ rc |= cnxk_sso_rxq_enable(
+ cnxk_eth_dev, (uint16_t)rx_queue_id, port,
+ &queue_conf->ev,
+ !!(queue_conf->rx_queue_flags &
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
+ rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
+ rxq_sp->qconf.mp->pool_id, true,
+ dev->force_ena_bp);
+ }
+
+ if (rc < 0) {
+ plt_err("Failed to configure Rx adapter port=%d, q=%d", port,
+ queue_conf->ev.queue_id);
+ return rc;
+ }
+
+ dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
+
+ return 0;
+}
+
+int
+cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id)
+{
+ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ int i, rc = 0;
+
+ RTE_SET_USED(event_dev);
+ if (rx_queue_id < 0) {
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+ cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, i);
+ } else {
+ rxq_sp = cnxk_eth_rxq_to_sp(
+ eth_dev->data->rx_queues[rx_queue_id]);
+ rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
+ rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
+ rxq_sp->qconf.mp->pool_id, false,
+ dev->force_ena_bp);
+ }
+
+ if (rc < 0)
+ plt_err("Failed to clear Rx adapter config port=%d, q=%d",
+ eth_dev->data->port_id, rx_queue_id);
+
+ return rc;
+}
+
+int
+cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ RTE_SET_USED(event_dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+int
+cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ RTE_SET_USED(event_dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
'cnxk_tim_worker.c',
)
-deps += ['bus_pci', 'common_cnxk']
+extra_flags = ['-flax-vector-conversions', '-Wno-strict-aliasing']
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
+
+deps += ['bus_pci', 'common_cnxk', 'net_cnxk']