62b9695a2575ad3b36c434af6278851e46ab0df2
[dpdk.git] / drivers / event / dlb / dlb.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include <assert.h>
6 #include <errno.h>
7 #include <nmmintrin.h>
8 #include <pthread.h>
9 #include <stdbool.h>
10 #include <stdint.h>
11 #include <stdio.h>
12 #include <string.h>
13 #include <sys/fcntl.h>
14 #include <sys/mman.h>
15 #include <unistd.h>
16
17 #include <rte_common.h>
18 #include <rte_config.h>
19 #include <rte_cycles.h>
20 #include <rte_debug.h>
21 #include <rte_dev.h>
22 #include <rte_errno.h>
23 #include <rte_io.h>
24 #include <rte_kvargs.h>
25 #include <rte_log.h>
26 #include <rte_malloc.h>
27 #include <rte_mbuf.h>
28 #include <rte_prefetch.h>
29 #include <rte_ring.h>
30 #include <rte_string_fns.h>
31
32 #include <rte_eventdev.h>
33 #include <rte_eventdev_pmd.h>
34
35 #include "dlb_priv.h"
36 #include "dlb_iface.h"
37 #include "dlb_inline_fns.h"
38
39 /*
40  * Resources exposed to eventdev.
41  */
42 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
43 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
44 #endif
45 static struct rte_event_dev_info evdev_dlb_default_info = {
46         .driver_name = "", /* probe will set */
47         .min_dequeue_timeout_ns = DLB_MIN_DEQUEUE_TIMEOUT_NS,
48         .max_dequeue_timeout_ns = DLB_MAX_DEQUEUE_TIMEOUT_NS,
49 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB_MAX_NUM_LDB_QUEUES)
50         .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
51 #else
52         .max_event_queues = DLB_MAX_NUM_LDB_QUEUES,
53 #endif
54         .max_event_queue_flows = DLB_MAX_NUM_FLOWS,
55         .max_event_queue_priority_levels = DLB_QID_PRIORITIES,
56         .max_event_priority_levels = DLB_QID_PRIORITIES,
57         .max_event_ports = DLB_MAX_NUM_LDB_PORTS,
58         .max_event_port_dequeue_depth = DLB_MAX_CQ_DEPTH,
59         .max_event_port_enqueue_depth = DLB_MAX_ENQUEUE_DEPTH,
60         .max_event_port_links = DLB_MAX_NUM_QIDS_PER_LDB_CQ,
61         .max_num_events = DLB_MAX_NUM_LDB_CREDITS,
62         .max_single_link_event_port_queue_pairs = DLB_MAX_NUM_DIR_PORTS,
63         .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
64                           RTE_EVENT_DEV_CAP_EVENT_QOS |
65                           RTE_EVENT_DEV_CAP_BURST_MODE |
66                           RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
67                           RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
68                           RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
69 };
70
71 struct process_local_port_data
72 dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
73
74 uint32_t
75 dlb_get_queue_depth(struct dlb_eventdev *dlb,
76                     struct dlb_eventdev_queue *queue)
77 {
78         /* DUMMY FOR NOW So "xstats" patch compiles */
79         RTE_SET_USED(dlb);
80         RTE_SET_USED(queue);
81
82         return 0;
83 }
84
85 static int
86 dlb_hw_query_resources(struct dlb_eventdev *dlb)
87 {
88         struct dlb_hw_dev *handle = &dlb->qm_instance;
89         struct dlb_hw_resource_info *dlb_info = &handle->info;
90         int ret;
91
92         ret = dlb_iface_get_num_resources(handle,
93                                           &dlb->hw_rsrc_query_results);
94         if (ret) {
95                 DLB_LOG_ERR("get dlb num resources, err=%d\n", ret);
96                 return ret;
97         }
98
99         /* Complete filling in device resource info returned to evdev app,
100          * overriding any default values.
101          * The capabilities (CAPs) were set at compile time.
102          */
103
104         evdev_dlb_default_info.max_event_queues =
105                 dlb->hw_rsrc_query_results.num_ldb_queues;
106
107         evdev_dlb_default_info.max_event_ports =
108                 dlb->hw_rsrc_query_results.num_ldb_ports;
109
110         evdev_dlb_default_info.max_num_events =
111                 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
112
113         /* Save off values used when creating the scheduling domain. */
114
115         handle->info.num_sched_domains =
116                 dlb->hw_rsrc_query_results.num_sched_domains;
117
118         handle->info.hw_rsrc_max.nb_events_limit =
119                 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
120
121         handle->info.hw_rsrc_max.num_queues =
122                 dlb->hw_rsrc_query_results.num_ldb_queues +
123                 dlb->hw_rsrc_query_results.num_dir_ports;
124
125         handle->info.hw_rsrc_max.num_ldb_queues =
126                 dlb->hw_rsrc_query_results.num_ldb_queues;
127
128         handle->info.hw_rsrc_max.num_ldb_ports =
129                 dlb->hw_rsrc_query_results.num_ldb_ports;
130
131         handle->info.hw_rsrc_max.num_dir_ports =
132                 dlb->hw_rsrc_query_results.num_dir_ports;
133
134         handle->info.hw_rsrc_max.reorder_window_size =
135                 dlb->hw_rsrc_query_results.num_hist_list_entries;
136
137         rte_memcpy(dlb_info, &handle->info.hw_rsrc_max, sizeof(*dlb_info));
138
139         return 0;
140 }
141
142 /* Wrapper for string to int conversion. Substituted for atoi(...), which is
143  * unsafe.
144  */
145 #define DLB_BASE_10 10
146
147 static int
148 dlb_string_to_int(int *result, const char *str)
149 {
150         long ret;
151         char *endstr;
152
153         if (str == NULL || result == NULL)
154                 return -EINVAL;
155
156         errno = 0;
157         ret = strtol(str, &endstr, DLB_BASE_10);
158         if (errno)
159                 return -errno;
160
161         /* long int and int may be different width for some architectures */
162         if (ret < INT_MIN || ret > INT_MAX || endstr == str)
163                 return -EINVAL;
164
165         *result = ret;
166         return 0;
167 }
168
169 static int
170 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
171 {
172         int *socket_id = opaque;
173         int ret;
174
175         ret = dlb_string_to_int(socket_id, value);
176         if (ret < 0)
177                 return ret;
178
179         if (*socket_id > RTE_MAX_NUMA_NODES)
180                 return -EINVAL;
181
182         return 0;
183 }
184
185 static int
186 set_max_num_events(const char *key __rte_unused,
187                    const char *value,
188                    void *opaque)
189 {
190         int *max_num_events = opaque;
191         int ret;
192
193         if (value == NULL || opaque == NULL) {
194                 DLB_LOG_ERR("NULL pointer\n");
195                 return -EINVAL;
196         }
197
198         ret = dlb_string_to_int(max_num_events, value);
199         if (ret < 0)
200                 return ret;
201
202         if (*max_num_events < 0 || *max_num_events > DLB_MAX_NUM_LDB_CREDITS) {
203                 DLB_LOG_ERR("dlb: max_num_events must be between 0 and %d\n",
204                             DLB_MAX_NUM_LDB_CREDITS);
205                 return -EINVAL;
206         }
207
208         return 0;
209 }
210
211 static int
212 set_num_dir_credits(const char *key __rte_unused,
213                     const char *value,
214                     void *opaque)
215 {
216         int *num_dir_credits = opaque;
217         int ret;
218
219         if (value == NULL || opaque == NULL) {
220                 DLB_LOG_ERR("NULL pointer\n");
221                 return -EINVAL;
222         }
223
224         ret = dlb_string_to_int(num_dir_credits, value);
225         if (ret < 0)
226                 return ret;
227
228         if (*num_dir_credits < 0 ||
229             *num_dir_credits > DLB_MAX_NUM_DIR_CREDITS) {
230                 DLB_LOG_ERR("dlb: num_dir_credits must be between 0 and %d\n",
231                             DLB_MAX_NUM_DIR_CREDITS);
232                 return -EINVAL;
233         }
234
235         return 0;
236 }
237
238 static int
239 set_dev_id(const char *key __rte_unused,
240            const char *value,
241            void *opaque)
242 {
243         int *dev_id = opaque;
244         int ret;
245
246         if (value == NULL || opaque == NULL) {
247                 DLB_LOG_ERR("NULL pointer\n");
248                 return -EINVAL;
249         }
250
251         ret = dlb_string_to_int(dev_id, value);
252         if (ret < 0)
253                 return ret;
254
255         return 0;
256 }
257
258 static int
259 set_defer_sched(const char *key __rte_unused,
260                 const char *value,
261                 void *opaque)
262 {
263         int *defer_sched = opaque;
264
265         if (value == NULL || opaque == NULL) {
266                 DLB_LOG_ERR("NULL pointer\n");
267                 return -EINVAL;
268         }
269
270         if (strncmp(value, "on", 2) != 0) {
271                 DLB_LOG_ERR("Invalid defer_sched argument \"%s\" (expected \"on\")\n",
272                             value);
273                 return -EINVAL;
274         }
275
276         *defer_sched = 1;
277
278         return 0;
279 }
280
281 static int
282 set_num_atm_inflights(const char *key __rte_unused,
283                       const char *value,
284                       void *opaque)
285 {
286         int *num_atm_inflights = opaque;
287         int ret;
288
289         if (value == NULL || opaque == NULL) {
290                 DLB_LOG_ERR("NULL pointer\n");
291                 return -EINVAL;
292         }
293
294         ret = dlb_string_to_int(num_atm_inflights, value);
295         if (ret < 0)
296                 return ret;
297
298         if (*num_atm_inflights < 0 ||
299             *num_atm_inflights > DLB_MAX_NUM_ATM_INFLIGHTS) {
300                 DLB_LOG_ERR("dlb: atm_inflights must be between 0 and %d\n",
301                             DLB_MAX_NUM_ATM_INFLIGHTS);
302                 return -EINVAL;
303         }
304
305         return 0;
306 }
307
308 void
309 dlb_entry_points_init(struct rte_eventdev *dev)
310 {
311         static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
312                 .dump             = dlb_eventdev_dump,
313                 .xstats_get       = dlb_eventdev_xstats_get,
314                 .xstats_get_names = dlb_eventdev_xstats_get_names,
315                 .xstats_get_by_name = dlb_eventdev_xstats_get_by_name,
316                 .xstats_reset       = dlb_eventdev_xstats_reset,
317         };
318
319         /* Expose PMD's eventdev interface */
320         dev->dev_ops = &dlb_eventdev_entry_ops;
321 }
322
323 int
324 dlb_primary_eventdev_probe(struct rte_eventdev *dev,
325                            const char *name,
326                            struct dlb_devargs *dlb_args)
327 {
328         struct dlb_eventdev *dlb;
329         int err;
330
331         dlb = dev->data->dev_private;
332
333         dlb->event_dev = dev; /* backlink */
334
335         evdev_dlb_default_info.driver_name = name;
336
337         dlb->max_num_events_override = dlb_args->max_num_events;
338         dlb->num_dir_credits_override = dlb_args->num_dir_credits_override;
339         dlb->defer_sched = dlb_args->defer_sched;
340         dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights;
341
342         /* Open the interface.
343          * For vdev mode, this means open the dlb kernel module.
344          */
345         err = dlb_iface_open(&dlb->qm_instance, name);
346         if (err < 0) {
347                 DLB_LOG_ERR("could not open event hardware device, err=%d\n",
348                             err);
349                 return err;
350         }
351
352         err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision);
353         if (err < 0) {
354                 DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n",
355                             err);
356                 return err;
357         }
358
359         err = dlb_hw_query_resources(dlb);
360         if (err) {
361                 DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
362                 return err;
363         }
364
365         err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode);
366         if (err < 0) {
367                 DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", err);
368                 return err;
369         }
370
371         /* Complete xtstats runtime initialization */
372         err = dlb_xstats_init(dlb);
373         if (err) {
374                 DLB_LOG_ERR("dlb: failed to init xstats, err=%d\n", err);
375                 return err;
376         }
377
378         rte_spinlock_init(&dlb->qm_instance.resource_lock);
379
380         dlb_iface_low_level_io_init(dlb);
381
382         dlb_entry_points_init(dev);
383
384         return 0;
385 }
386
387 int
388 dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
389                              const char *name)
390 {
391         struct dlb_eventdev *dlb;
392         int err;
393
394         dlb = dev->data->dev_private;
395
396         evdev_dlb_default_info.driver_name = name;
397
398         err = dlb_iface_open(&dlb->qm_instance, name);
399         if (err < 0) {
400                 DLB_LOG_ERR("could not open event hardware device, err=%d\n",
401                             err);
402                 return err;
403         }
404
405         err = dlb_hw_query_resources(dlb);
406         if (err) {
407                 DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
408                 return err;
409         }
410
411         dlb_iface_low_level_io_init(dlb);
412
413         dlb_entry_points_init(dev);
414
415         return 0;
416 }
417
418 int
419 dlb_parse_params(const char *params,
420                  const char *name,
421                  struct dlb_devargs *dlb_args)
422 {
423         int ret = 0;
424         static const char * const args[] = { NUMA_NODE_ARG,
425                                              DLB_MAX_NUM_EVENTS,
426                                              DLB_NUM_DIR_CREDITS,
427                                              DEV_ID_ARG,
428                                              DLB_DEFER_SCHED_ARG,
429                                              DLB_NUM_ATM_INFLIGHTS_ARG,
430                                              NULL };
431
432         if (params && params[0] != '\0') {
433                 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
434
435                 if (kvlist == NULL) {
436                         DLB_LOG_INFO("Ignoring unsupported parameters when creating device '%s'\n",
437                                      name);
438                 } else {
439                         int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
440                                                      set_numa_node,
441                                                      &dlb_args->socket_id);
442                         if (ret != 0) {
443                                 DLB_LOG_ERR("%s: Error parsing numa node parameter",
444                                             name);
445                                 rte_kvargs_free(kvlist);
446                                 return ret;
447                         }
448
449                         ret = rte_kvargs_process(kvlist, DLB_MAX_NUM_EVENTS,
450                                                  set_max_num_events,
451                                                  &dlb_args->max_num_events);
452                         if (ret != 0) {
453                                 DLB_LOG_ERR("%s: Error parsing max_num_events parameter",
454                                             name);
455                                 rte_kvargs_free(kvlist);
456                                 return ret;
457                         }
458
459                         ret = rte_kvargs_process(kvlist,
460                                         DLB_NUM_DIR_CREDITS,
461                                         set_num_dir_credits,
462                                         &dlb_args->num_dir_credits_override);
463                         if (ret != 0) {
464                                 DLB_LOG_ERR("%s: Error parsing num_dir_credits parameter",
465                                             name);
466                                 rte_kvargs_free(kvlist);
467                                 return ret;
468                         }
469
470                         ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
471                                                  set_dev_id,
472                                                  &dlb_args->dev_id);
473                         if (ret != 0) {
474                                 DLB_LOG_ERR("%s: Error parsing dev_id parameter",
475                                             name);
476                                 rte_kvargs_free(kvlist);
477                                 return ret;
478                         }
479
480                         ret = rte_kvargs_process(kvlist, DLB_DEFER_SCHED_ARG,
481                                                  set_defer_sched,
482                                                  &dlb_args->defer_sched);
483                         if (ret != 0) {
484                                 DLB_LOG_ERR("%s: Error parsing defer_sched parameter",
485                                             name);
486                                 rte_kvargs_free(kvlist);
487                                 return ret;
488                         }
489
490                         ret = rte_kvargs_process(kvlist,
491                                                  DLB_NUM_ATM_INFLIGHTS_ARG,
492                                                  set_num_atm_inflights,
493                                                  &dlb_args->num_atm_inflights);
494                         if (ret != 0) {
495                                 DLB_LOG_ERR("%s: Error parsing atm_inflights parameter",
496                                             name);
497                                 rte_kvargs_free(kvlist);
498                                 return ret;
499                         }
500
501                         rte_kvargs_free(kvlist);
502                 }
503         }
504         return ret;
505 }
506 RTE_LOG_REGISTER(eventdev_dlb_log_level, pmd.event.dlb, NOTICE);