#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
+#define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops) \
+ (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] \
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] \
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] \
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] \
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
+
+#define CN10K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops) \
+ (enq_op = \
+ enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] \
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] \
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)] \
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)] \
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
+ [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
+
static uint32_t
cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
{
#undef R
};
- const event_dequeue_t sso_hws_tmo_deq[2][2][2][2][2][2] = {
+ const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2] = {
#define R(name, f5, f4, f3, f2, f1, f0, flags) \
[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
- const event_dequeue_burst_t sso_hws_tmo_deq_burst[2][2][2][2][2][2] = {
+ const event_dequeue_burst_t sso_hws_deq_tmo_burst[2][2][2][2][2][2] = {
#define R(name, f5, f4, f3, f2, f1, f0, flags) \
[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
+ const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t sso_hws_deq_ca_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
#define R(name, f5, f4, f3, f2, f1, f0, flags) \
[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_##name,
#undef R
};
- const event_dequeue_t sso_hws_tmo_deq_seg[2][2][2][2][2][2] = {
+ const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2] = {
#define R(name, f5, f4, f3, f2, f1, f0, flags) \
[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_##name,
NIX_RX_FASTPATH_MODES
};
const event_dequeue_burst_t
- sso_hws_tmo_deq_seg_burst[2][2][2][2][2][2] = {
+ sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2] = {
#define R(name, f5, f4, f3, f2, f1, f0, flags) \
[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
};
+ const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ sso_hws_deq_ca_seg_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ /* Tx modes */
+ const event_tx_adapter_enqueue
+ sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
+ NIX_TX_FASTPATH_MODES
+#undef T
+ };
+
+ const event_tx_adapter_enqueue
+ sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
+ NIX_TX_FASTPATH_MODES
+#undef T
+ };
+
event_dev->enqueue = cn10k_sso_hws_enq;
event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
- event_dev->dequeue = sso_hws_deq_seg
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
- event_dev->dequeue_burst = sso_hws_deq_seg_burst
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_deq_seg);
+ CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+ sso_hws_deq_seg_burst);
if (dev->is_timeout_deq) {
- event_dev->dequeue = sso_hws_tmo_deq_seg
- [!!(dev->rx_offloads &
- NIX_RX_OFFLOAD_VLAN_STRIP_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
- [!!(dev->rx_offloads &
- NIX_RX_OFFLOAD_MARK_UPDATE_F)]
- [!!(dev->rx_offloads &
- NIX_RX_OFFLOAD_CHECKSUM_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
- event_dev->dequeue_burst = sso_hws_tmo_deq_seg_burst
- [!!(dev->rx_offloads &
- NIX_RX_OFFLOAD_VLAN_STRIP_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
- [!!(dev->rx_offloads &
- NIX_RX_OFFLOAD_MARK_UPDATE_F)]
- [!!(dev->rx_offloads &
- NIX_RX_OFFLOAD_CHECKSUM_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_deq_tmo_seg);
+ CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+ sso_hws_deq_tmo_seg_burst);
+ }
+ if (dev->is_ca_internal_port) {
+ CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_deq_ca_seg);
+ CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+ sso_hws_deq_ca_seg_burst);
}
} else {
- event_dev->dequeue = sso_hws_deq
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
- event_dev->dequeue_burst = sso_hws_deq_burst
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
+ CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+ sso_hws_deq_burst);
if (dev->is_timeout_deq) {
- event_dev->dequeue = sso_hws_tmo_deq
- [!!(dev->rx_offloads &
- NIX_RX_OFFLOAD_VLAN_STRIP_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
- [!!(dev->rx_offloads &
- NIX_RX_OFFLOAD_MARK_UPDATE_F)]
- [!!(dev->rx_offloads &
- NIX_RX_OFFLOAD_CHECKSUM_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
- event_dev->dequeue_burst = sso_hws_tmo_deq_burst
- [!!(dev->rx_offloads &
- NIX_RX_OFFLOAD_VLAN_STRIP_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
- [!!(dev->rx_offloads &
- NIX_RX_OFFLOAD_MARK_UPDATE_F)]
- [!!(dev->rx_offloads &
- NIX_RX_OFFLOAD_CHECKSUM_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
- [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_deq_tmo);
+ CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+ sso_hws_deq_tmo_burst);
+ }
+ if (dev->is_ca_internal_port) {
+ CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_deq_ca);
+ CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+ sso_hws_deq_ca_burst);
}
}
+ event_dev->ca_enqueue = cn10k_sso_hws_ca_enq;
+
+ if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
+ CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
+ sso_hws_tx_adptr_enq_seg);
+ else
+ CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
+ sso_hws_tx_adptr_enq);
+
+ event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
}
static void
else
*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
- RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID |
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR;
return 0;
}
return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
}
+static int
+cn10k_sso_rx_adapter_vector_limits(
+ const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
+ struct rte_event_eth_rx_adapter_vector_limits *limits)
+{
+ struct cnxk_eth_dev *cnxk_eth_dev;
+ int ret;
+
+ RTE_SET_USED(dev);
+ ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
+ if (ret)
+ return -ENOTSUP;
+
+ cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
+ limits->log2_sz = true;
+ limits->min_sz = 1 << ROC_NIX_VWQE_MIN_SIZE_LOG2;
+ limits->max_sz = 1 << ROC_NIX_VWQE_MAX_SIZE_LOG2;
+ limits->min_timeout_ns =
+ (roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100;
+ limits->max_timeout_ns = BITMASK_ULL(8, 0) * limits->min_timeout_ns;
+
+ return 0;
+}
+
+static int
+cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
+ uint16_t port_id, uint16_t rq_id, uint16_t sz,
+ uint64_t tmo_ns, struct rte_mempool *vmp)
+{
+ struct roc_nix_rq *rq;
+
+ rq = &cnxk_eth_dev->rqs[rq_id];
+
+ if (!rq->sso_ena)
+ return -EINVAL;
+ if (rq->flow_tag_width == 0)
+ return -EINVAL;
+
+ rq->vwqe_ena = 1;
+ rq->vwqe_first_skip = 0;
+ rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
+ rq->vwqe_max_sz_exp = rte_log2_u32(sz);
+ rq->vwqe_wait_tmo =
+ tmo_ns /
+ ((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
+ rq->tag_mask = (port_id & 0xF) << 20;
+ rq->tag_mask |=
+ (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
+ << 24;
+
+ return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+}
+
+static int
+cn10k_sso_rx_adapter_vector_config(
+ const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_event_vector_config *config)
+{
+ struct cnxk_eth_dev *cnxk_eth_dev;
+ struct cnxk_sso_evdev *dev;
+ int i, rc;
+
+ rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
+ if (rc)
+ return -EINVAL;
+
+ dev = cnxk_sso_pmd_priv(event_dev);
+ cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
+ if (rx_queue_id < 0) {
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
+ RTE_EVENT_TYPE_ETHDEV_VECTOR);
+ rc = cnxk_sso_xae_reconfigure(
+ (struct rte_eventdev *)(uintptr_t)event_dev);
+ rc = cnxk_sso_rx_adapter_vwqe_enable(
+ cnxk_eth_dev, eth_dev->data->port_id, i,
+ config->vector_sz, config->vector_timeout_ns,
+ config->vector_mp);
+ if (rc)
+ return -EINVAL;
+ }
+ } else {
+
+ cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
+ RTE_EVENT_TYPE_ETHDEV_VECTOR);
+ rc = cnxk_sso_xae_reconfigure(
+ (struct rte_eventdev *)(uintptr_t)event_dev);
+ rc = cnxk_sso_rx_adapter_vwqe_enable(
+ cnxk_eth_dev, eth_dev->data->port_id, rx_queue_id,
+ config->vector_sz, config->vector_timeout_ns,
+ config->vector_mp);
+ if (rc)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int
cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev, uint32_t *caps)
if (ret)
*caps = 0;
else
- *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
+ *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT |
+ RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
return 0;
}
return cn10k_sso_updt_tx_adptr_data(event_dev);
}
+static int
+cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
+ const struct rte_cryptodev *cdev, uint32_t *caps)
+{
+ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
+ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
+
+ *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
+
+ return 0;
+}
+
+static int
+cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
+ const struct rte_cryptodev *cdev,
+ int32_t queue_pair_id,
+ const struct rte_event *event)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+
+ RTE_SET_USED(event);
+
+ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
+ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
+
+ dev->is_ca_internal_port = 1;
+ cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+ return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+}
+
+static int
+cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
+ const struct rte_cryptodev *cdev,
+ int32_t queue_pair_id)
+{
+ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
+ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
+
+ return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
+}
+
static struct rte_eventdev_ops cn10k_sso_dev_ops = {
.dev_infos_get = cn10k_sso_info_get,
.dev_configure = cn10k_sso_dev_configure,
.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
+ .eth_rx_adapter_vector_limits_get = cn10k_sso_rx_adapter_vector_limits,
+ .eth_rx_adapter_event_vector_config =
+ cn10k_sso_rx_adapter_vector_config,
+
.eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
.eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,
.eth_tx_adapter_queue_del = cn10k_sso_tx_adapter_queue_del,
.timer_adapter_caps_get = cnxk_tim_caps_get,
+ .crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
+ .crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
+ .crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
+
.dump = cnxk_sso_dump,
.dev_start = cn10k_sso_start,
.dev_stop = cn10k_sso_stop,
int rc;
if (RTE_CACHE_LINE_SIZE != 64) {
- plt_err("Driver not compiled for CN9K");
+ plt_err("Driver not compiled for CN10K");
return -EFAULT;
}