1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cnxk_eventdev.h"
8 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
9 struct rte_event_dev_info *dev_info)
12 dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
13 dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
14 dev_info->max_event_queues = dev->max_event_queues;
15 dev_info->max_event_queue_flows = (1ULL << 20);
16 dev_info->max_event_queue_priority_levels = 8;
17 dev_info->max_event_priority_levels = 1;
18 dev_info->max_event_ports = dev->max_event_ports;
19 dev_info->max_event_port_dequeue_depth = 1;
20 dev_info->max_event_port_enqueue_depth = 1;
21 dev_info->max_num_events = dev->max_num_events;
22 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
23 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
24 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
25 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
26 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
27 RTE_EVENT_DEV_CAP_NONSEQ_MODE |
28 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
32 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
34 char pool_name[RTE_MEMZONE_NAMESIZE];
35 uint32_t xaq_cnt, npa_aura_id;
36 const struct rte_memzone *mz;
37 struct npa_aura_s *aura;
38 static int reconfig_cnt;
42 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
44 plt_err("Failed to release XAQ %d", rc);
47 rte_mempool_free(dev->xaq_pool);
52 * Allocate memory for Add work backpressure.
54 mz = rte_memzone_lookup(CNXK_SSO_FC_NAME);
56 mz = rte_memzone_reserve_aligned(CNXK_SSO_FC_NAME,
57 sizeof(struct npa_aura_s) +
59 0, 0, RTE_CACHE_LINE_SIZE);
61 plt_err("Failed to allocate mem for fcmem");
65 dev->fc_iova = mz->iova;
66 dev->fc_mem = mz->addr;
68 aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem +
70 memset(aura, 0, sizeof(struct npa_aura_s));
73 aura->fc_addr = dev->fc_iova;
74 aura->fc_hyst_bits = 0; /* Store count on all updates */
76 /* Taken from HRM 14.3.3(4) */
77 xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
78 xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
79 (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
81 plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
82 /* Setup XAQ based on number of nb queues. */
83 snprintf(pool_name, 30, "cnxk_xaq_buf_pool_%d", reconfig_cnt);
84 dev->xaq_pool = (void *)rte_mempool_create_empty(
85 pool_name, xaq_cnt, dev->sso.xaq_buf_size, 0, 0,
88 if (dev->xaq_pool == NULL) {
89 plt_err("Unable to create empty mempool.");
94 rc = rte_mempool_set_ops_byname(dev->xaq_pool,
95 rte_mbuf_platform_mempool_ops(), aura);
97 plt_err("Unable to set xaqpool ops.");
101 rc = rte_mempool_populate_default(dev->xaq_pool);
103 plt_err("Unable to set populate xaqpool.");
107 /* When SW does addwork (enqueue) check if there is space in XAQ by
108 * comparing fc_addr above against the xaq_lmt calculated below.
109 * There should be a minimum headroom (CNXK_SSO_XAQ_SLACK / 2) for SSO
110 * to request XAQ to cache them even before enqueue is called.
113 xaq_cnt - (CNXK_SSO_XAQ_SLACK / 2 * dev->nb_event_queues);
114 dev->nb_xaq_cfg = xaq_cnt;
116 npa_aura_id = roc_npa_aura_handle_to_aura(dev->xaq_pool->pool_id);
117 return roc_sso_hwgrp_alloc_xaq(&dev->sso, npa_aura_id,
118 dev->nb_event_queues);
120 rte_mempool_free(dev->xaq_pool);
121 rte_memzone_free(mz);
126 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
128 struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
129 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
133 deq_tmo_ns = conf->dequeue_timeout_ns;
136 deq_tmo_ns = dev->min_dequeue_timeout_ns;
137 if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
138 deq_tmo_ns > dev->max_dequeue_timeout_ns) {
139 plt_err("Unsupported dequeue timeout requested");
143 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
144 dev->is_timeout_deq = 1;
146 dev->deq_tmo_ns = deq_tmo_ns;
148 if (!conf->nb_event_queues || !conf->nb_event_ports ||
149 conf->nb_event_ports > dev->max_event_ports ||
150 conf->nb_event_queues > dev->max_event_queues) {
151 plt_err("Unsupported event queues/ports requested");
155 if (conf->nb_event_port_dequeue_depth > 1) {
156 plt_err("Unsupported event port deq depth requested");
160 if (conf->nb_event_port_enqueue_depth > 1) {
161 plt_err("Unsupported event port enq depth requested");
166 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
168 plt_err("Failed to release XAQ %d", rc);
171 rte_mempool_free(dev->xaq_pool);
172 dev->xaq_pool = NULL;
175 dev->nb_event_queues = conf->nb_event_queues;
176 dev->nb_event_ports = conf->nb_event_ports;
182 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
183 struct rte_event_queue_conf *queue_conf)
185 RTE_SET_USED(event_dev);
186 RTE_SET_USED(queue_id);
188 queue_conf->nb_atomic_flows = (1ULL << 20);
189 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
190 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
191 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
195 cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
196 const struct rte_event_queue_conf *queue_conf)
198 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
200 plt_sso_dbg("Queue=%d prio=%d", queue_id, queue_conf->priority);
201 /* Normalize <0-255> to <0-7> */
202 return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, 0xFF, 0xFF,
203 queue_conf->priority / 32);
207 cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
209 RTE_SET_USED(event_dev);
210 RTE_SET_USED(queue_id);
214 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
215 struct rte_event_port_conf *port_conf)
217 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
219 RTE_SET_USED(port_id);
220 port_conf->new_event_threshold = dev->max_num_events;
221 port_conf->dequeue_depth = 1;
222 port_conf->enqueue_depth = 1;
226 cnxk_sso_init(struct rte_eventdev *event_dev)
228 const struct rte_memzone *mz = NULL;
229 struct rte_pci_device *pci_dev;
230 struct cnxk_sso_evdev *dev;
233 mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
236 plt_err("Failed to create eventdev memzone");
240 dev = cnxk_sso_pmd_priv(event_dev);
241 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
242 dev->sso.pci_dev = pci_dev;
244 *(uint64_t *)mz->addr = (uint64_t)dev;
246 /* Initialize the base cnxk_dev object */
247 rc = roc_sso_dev_init(&dev->sso);
249 plt_err("Failed to initialize RoC SSO rc=%d", rc);
253 dev->is_timeout_deq = 0;
254 dev->min_dequeue_timeout_ns = USEC2NSEC(1);
255 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
256 dev->max_num_events = -1;
257 dev->nb_event_queues = 0;
258 dev->nb_event_ports = 0;
263 rte_memzone_free(mz);
268 cnxk_sso_fini(struct rte_eventdev *event_dev)
270 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
272 /* For secondary processes, nothing to be done */
273 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
276 roc_sso_rsrc_fini(&dev->sso);
277 roc_sso_dev_fini(&dev->sso);
283 cnxk_sso_remove(struct rte_pci_device *pci_dev)
285 return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);