event/cnxk: allocate event in-flight buffers
[dpdk.git] / drivers / event / cnxk / cnxk_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cnxk_eventdev.h"
6
7 void
8 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
9                   struct rte_event_dev_info *dev_info)
10 {
11
12         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
13         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
14         dev_info->max_event_queues = dev->max_event_queues;
15         dev_info->max_event_queue_flows = (1ULL << 20);
16         dev_info->max_event_queue_priority_levels = 8;
17         dev_info->max_event_priority_levels = 1;
18         dev_info->max_event_ports = dev->max_event_ports;
19         dev_info->max_event_port_dequeue_depth = 1;
20         dev_info->max_event_port_enqueue_depth = 1;
21         dev_info->max_num_events = dev->max_num_events;
22         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
23                                   RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
24                                   RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
25                                   RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
26                                   RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
27                                   RTE_EVENT_DEV_CAP_NONSEQ_MODE |
28                                   RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
29 }
30
31 int
32 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
33 {
34         char pool_name[RTE_MEMZONE_NAMESIZE];
35         uint32_t xaq_cnt, npa_aura_id;
36         const struct rte_memzone *mz;
37         struct npa_aura_s *aura;
38         static int reconfig_cnt;
39         int rc;
40
41         if (dev->xaq_pool) {
42                 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
43                 if (rc < 0) {
44                         plt_err("Failed to release XAQ %d", rc);
45                         return rc;
46                 }
47                 rte_mempool_free(dev->xaq_pool);
48                 dev->xaq_pool = NULL;
49         }
50
51         /*
52          * Allocate memory for Add work backpressure.
53          */
54         mz = rte_memzone_lookup(CNXK_SSO_FC_NAME);
55         if (mz == NULL)
56                 mz = rte_memzone_reserve_aligned(CNXK_SSO_FC_NAME,
57                                                  sizeof(struct npa_aura_s) +
58                                                          RTE_CACHE_LINE_SIZE,
59                                                  0, 0, RTE_CACHE_LINE_SIZE);
60         if (mz == NULL) {
61                 plt_err("Failed to allocate mem for fcmem");
62                 return -ENOMEM;
63         }
64
65         dev->fc_iova = mz->iova;
66         dev->fc_mem = mz->addr;
67
68         aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem +
69                                      RTE_CACHE_LINE_SIZE);
70         memset(aura, 0, sizeof(struct npa_aura_s));
71
72         aura->fc_ena = 1;
73         aura->fc_addr = dev->fc_iova;
74         aura->fc_hyst_bits = 0; /* Store count on all updates */
75
76         /* Taken from HRM 14.3.3(4) */
77         xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
78         xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
79                    (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
80
81         plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
82         /* Setup XAQ based on number of nb queues. */
83         snprintf(pool_name, 30, "cnxk_xaq_buf_pool_%d", reconfig_cnt);
84         dev->xaq_pool = (void *)rte_mempool_create_empty(
85                 pool_name, xaq_cnt, dev->sso.xaq_buf_size, 0, 0,
86                 rte_socket_id(), 0);
87
88         if (dev->xaq_pool == NULL) {
89                 plt_err("Unable to create empty mempool.");
90                 rte_memzone_free(mz);
91                 return -ENOMEM;
92         }
93
94         rc = rte_mempool_set_ops_byname(dev->xaq_pool,
95                                         rte_mbuf_platform_mempool_ops(), aura);
96         if (rc != 0) {
97                 plt_err("Unable to set xaqpool ops.");
98                 goto alloc_fail;
99         }
100
101         rc = rte_mempool_populate_default(dev->xaq_pool);
102         if (rc < 0) {
103                 plt_err("Unable to set populate xaqpool.");
104                 goto alloc_fail;
105         }
106         reconfig_cnt++;
107         /* When SW does addwork (enqueue) check if there is space in XAQ by
108          * comparing fc_addr above against the xaq_lmt calculated below.
109          * There should be a minimum headroom (CNXK_SSO_XAQ_SLACK / 2) for SSO
110          * to request XAQ to cache them even before enqueue is called.
111          */
112         dev->xaq_lmt =
113                 xaq_cnt - (CNXK_SSO_XAQ_SLACK / 2 * dev->nb_event_queues);
114         dev->nb_xaq_cfg = xaq_cnt;
115
116         npa_aura_id = roc_npa_aura_handle_to_aura(dev->xaq_pool->pool_id);
117         return roc_sso_hwgrp_alloc_xaq(&dev->sso, npa_aura_id,
118                                        dev->nb_event_queues);
119 alloc_fail:
120         rte_mempool_free(dev->xaq_pool);
121         rte_memzone_free(mz);
122         return rc;
123 }
124
125 int
126 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
127 {
128         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
129         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
130         uint32_t deq_tmo_ns;
131         int rc;
132
133         deq_tmo_ns = conf->dequeue_timeout_ns;
134
135         if (deq_tmo_ns == 0)
136                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
137         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
138             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
139                 plt_err("Unsupported dequeue timeout requested");
140                 return -EINVAL;
141         }
142
143         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
144                 dev->is_timeout_deq = 1;
145
146         dev->deq_tmo_ns = deq_tmo_ns;
147
148         if (!conf->nb_event_queues || !conf->nb_event_ports ||
149             conf->nb_event_ports > dev->max_event_ports ||
150             conf->nb_event_queues > dev->max_event_queues) {
151                 plt_err("Unsupported event queues/ports requested");
152                 return -EINVAL;
153         }
154
155         if (conf->nb_event_port_dequeue_depth > 1) {
156                 plt_err("Unsupported event port deq depth requested");
157                 return -EINVAL;
158         }
159
160         if (conf->nb_event_port_enqueue_depth > 1) {
161                 plt_err("Unsupported event port enq depth requested");
162                 return -EINVAL;
163         }
164
165         if (dev->xaq_pool) {
166                 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
167                 if (rc < 0) {
168                         plt_err("Failed to release XAQ %d", rc);
169                         return rc;
170                 }
171                 rte_mempool_free(dev->xaq_pool);
172                 dev->xaq_pool = NULL;
173         }
174
175         dev->nb_event_queues = conf->nb_event_queues;
176         dev->nb_event_ports = conf->nb_event_ports;
177
178         return 0;
179 }
180
181 void
182 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
183                         struct rte_event_queue_conf *queue_conf)
184 {
185         RTE_SET_USED(event_dev);
186         RTE_SET_USED(queue_id);
187
188         queue_conf->nb_atomic_flows = (1ULL << 20);
189         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
190         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
191         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
192 }
193
194 int
195 cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
196                      const struct rte_event_queue_conf *queue_conf)
197 {
198         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
199
200         plt_sso_dbg("Queue=%d prio=%d", queue_id, queue_conf->priority);
201         /* Normalize <0-255> to <0-7> */
202         return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, 0xFF, 0xFF,
203                                           queue_conf->priority / 32);
204 }
205
206 void
207 cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
208 {
209         RTE_SET_USED(event_dev);
210         RTE_SET_USED(queue_id);
211 }
212
213 void
214 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
215                        struct rte_event_port_conf *port_conf)
216 {
217         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
218
219         RTE_SET_USED(port_id);
220         port_conf->new_event_threshold = dev->max_num_events;
221         port_conf->dequeue_depth = 1;
222         port_conf->enqueue_depth = 1;
223 }
224
225 int
226 cnxk_sso_init(struct rte_eventdev *event_dev)
227 {
228         const struct rte_memzone *mz = NULL;
229         struct rte_pci_device *pci_dev;
230         struct cnxk_sso_evdev *dev;
231         int rc;
232
233         mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
234                                  SOCKET_ID_ANY, 0);
235         if (mz == NULL) {
236                 plt_err("Failed to create eventdev memzone");
237                 return -ENOMEM;
238         }
239
240         dev = cnxk_sso_pmd_priv(event_dev);
241         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
242         dev->sso.pci_dev = pci_dev;
243
244         *(uint64_t *)mz->addr = (uint64_t)dev;
245
246         /* Initialize the base cnxk_dev object */
247         rc = roc_sso_dev_init(&dev->sso);
248         if (rc < 0) {
249                 plt_err("Failed to initialize RoC SSO rc=%d", rc);
250                 goto error;
251         }
252
253         dev->is_timeout_deq = 0;
254         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
255         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
256         dev->max_num_events = -1;
257         dev->nb_event_queues = 0;
258         dev->nb_event_ports = 0;
259
260         return 0;
261
262 error:
263         rte_memzone_free(mz);
264         return rc;
265 }
266
267 int
268 cnxk_sso_fini(struct rte_eventdev *event_dev)
269 {
270         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
271
272         /* For secondary processes, nothing to be done */
273         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
274                 return 0;
275
276         roc_sso_rsrc_fini(&dev->sso);
277         roc_sso_dev_fini(&dev->sso);
278
279         return 0;
280 }
281
282 int
283 cnxk_sso_remove(struct rte_pci_device *pci_dev)
284 {
285         return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);
286 }