1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
13 #include <sys/fcntl.h>
17 #include <rte_common.h>
18 #include <rte_config.h>
19 #include <rte_cycles.h>
20 #include <rte_debug.h>
22 #include <rte_errno.h>
24 #include <rte_kvargs.h>
26 #include <rte_malloc.h>
28 #include <rte_prefetch.h>
30 #include <rte_string_fns.h>
32 #include <rte_eventdev.h>
33 #include <rte_eventdev_pmd.h>
36 #include "dlb_iface.h"
37 #include "dlb_inline_fns.h"
40 * Resources exposed to eventdev.
42 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
43 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
45 static struct rte_event_dev_info evdev_dlb_default_info = {
46 .driver_name = "", /* probe will set */
47 .min_dequeue_timeout_ns = DLB_MIN_DEQUEUE_TIMEOUT_NS,
48 .max_dequeue_timeout_ns = DLB_MAX_DEQUEUE_TIMEOUT_NS,
49 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB_MAX_NUM_LDB_QUEUES)
50 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
52 .max_event_queues = DLB_MAX_NUM_LDB_QUEUES,
54 .max_event_queue_flows = DLB_MAX_NUM_FLOWS,
55 .max_event_queue_priority_levels = DLB_QID_PRIORITIES,
56 .max_event_priority_levels = DLB_QID_PRIORITIES,
57 .max_event_ports = DLB_MAX_NUM_LDB_PORTS,
58 .max_event_port_dequeue_depth = DLB_MAX_CQ_DEPTH,
59 .max_event_port_enqueue_depth = DLB_MAX_ENQUEUE_DEPTH,
60 .max_event_port_links = DLB_MAX_NUM_QIDS_PER_LDB_CQ,
61 .max_num_events = DLB_MAX_NUM_LDB_CREDITS,
62 .max_single_link_event_port_queue_pairs = DLB_MAX_NUM_DIR_PORTS,
63 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
64 RTE_EVENT_DEV_CAP_EVENT_QOS |
65 RTE_EVENT_DEV_CAP_BURST_MODE |
66 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
67 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
68 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
71 struct process_local_port_data
72 dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
75 dlb_get_queue_depth(struct dlb_eventdev *dlb,
76 struct dlb_eventdev_queue *queue)
78 /* DUMMY FOR NOW So "xstats" patch compiles */
86 dlb_hw_query_resources(struct dlb_eventdev *dlb)
88 struct dlb_hw_dev *handle = &dlb->qm_instance;
89 struct dlb_hw_resource_info *dlb_info = &handle->info;
92 ret = dlb_iface_get_num_resources(handle,
93 &dlb->hw_rsrc_query_results);
95 DLB_LOG_ERR("get dlb num resources, err=%d\n", ret);
99 /* Complete filling in device resource info returned to evdev app,
100 * overriding any default values.
101 * The capabilities (CAPs) were set at compile time.
104 evdev_dlb_default_info.max_event_queues =
105 dlb->hw_rsrc_query_results.num_ldb_queues;
107 evdev_dlb_default_info.max_event_ports =
108 dlb->hw_rsrc_query_results.num_ldb_ports;
110 evdev_dlb_default_info.max_num_events =
111 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
113 /* Save off values used when creating the scheduling domain. */
115 handle->info.num_sched_domains =
116 dlb->hw_rsrc_query_results.num_sched_domains;
118 handle->info.hw_rsrc_max.nb_events_limit =
119 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits;
121 handle->info.hw_rsrc_max.num_queues =
122 dlb->hw_rsrc_query_results.num_ldb_queues +
123 dlb->hw_rsrc_query_results.num_dir_ports;
125 handle->info.hw_rsrc_max.num_ldb_queues =
126 dlb->hw_rsrc_query_results.num_ldb_queues;
128 handle->info.hw_rsrc_max.num_ldb_ports =
129 dlb->hw_rsrc_query_results.num_ldb_ports;
131 handle->info.hw_rsrc_max.num_dir_ports =
132 dlb->hw_rsrc_query_results.num_dir_ports;
134 handle->info.hw_rsrc_max.reorder_window_size =
135 dlb->hw_rsrc_query_results.num_hist_list_entries;
137 rte_memcpy(dlb_info, &handle->info.hw_rsrc_max, sizeof(*dlb_info));
142 /* Wrapper for string to int conversion. Substituted for atoi(...), which is
145 #define DLB_BASE_10 10
148 dlb_string_to_int(int *result, const char *str)
153 if (str == NULL || result == NULL)
157 ret = strtol(str, &endstr, DLB_BASE_10);
161 /* long int and int may be different width for some architectures */
162 if (ret < INT_MIN || ret > INT_MAX || endstr == str)
170 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
172 int *socket_id = opaque;
175 ret = dlb_string_to_int(socket_id, value);
179 if (*socket_id > RTE_MAX_NUMA_NODES)
186 set_max_num_events(const char *key __rte_unused,
190 int *max_num_events = opaque;
193 if (value == NULL || opaque == NULL) {
194 DLB_LOG_ERR("NULL pointer\n");
198 ret = dlb_string_to_int(max_num_events, value);
202 if (*max_num_events < 0 || *max_num_events > DLB_MAX_NUM_LDB_CREDITS) {
203 DLB_LOG_ERR("dlb: max_num_events must be between 0 and %d\n",
204 DLB_MAX_NUM_LDB_CREDITS);
212 set_num_dir_credits(const char *key __rte_unused,
216 int *num_dir_credits = opaque;
219 if (value == NULL || opaque == NULL) {
220 DLB_LOG_ERR("NULL pointer\n");
224 ret = dlb_string_to_int(num_dir_credits, value);
228 if (*num_dir_credits < 0 ||
229 *num_dir_credits > DLB_MAX_NUM_DIR_CREDITS) {
230 DLB_LOG_ERR("dlb: num_dir_credits must be between 0 and %d\n",
231 DLB_MAX_NUM_DIR_CREDITS);
239 set_dev_id(const char *key __rte_unused,
243 int *dev_id = opaque;
246 if (value == NULL || opaque == NULL) {
247 DLB_LOG_ERR("NULL pointer\n");
251 ret = dlb_string_to_int(dev_id, value);
259 set_defer_sched(const char *key __rte_unused,
263 int *defer_sched = opaque;
265 if (value == NULL || opaque == NULL) {
266 DLB_LOG_ERR("NULL pointer\n");
270 if (strncmp(value, "on", 2) != 0) {
271 DLB_LOG_ERR("Invalid defer_sched argument \"%s\" (expected \"on\")\n",
282 set_num_atm_inflights(const char *key __rte_unused,
286 int *num_atm_inflights = opaque;
289 if (value == NULL || opaque == NULL) {
290 DLB_LOG_ERR("NULL pointer\n");
294 ret = dlb_string_to_int(num_atm_inflights, value);
298 if (*num_atm_inflights < 0 ||
299 *num_atm_inflights > DLB_MAX_NUM_ATM_INFLIGHTS) {
300 DLB_LOG_ERR("dlb: atm_inflights must be between 0 and %d\n",
301 DLB_MAX_NUM_ATM_INFLIGHTS);
309 dlb_entry_points_init(struct rte_eventdev *dev)
311 static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
312 .dump = dlb_eventdev_dump,
313 .xstats_get = dlb_eventdev_xstats_get,
314 .xstats_get_names = dlb_eventdev_xstats_get_names,
315 .xstats_get_by_name = dlb_eventdev_xstats_get_by_name,
316 .xstats_reset = dlb_eventdev_xstats_reset,
319 /* Expose PMD's eventdev interface */
320 dev->dev_ops = &dlb_eventdev_entry_ops;
324 dlb_primary_eventdev_probe(struct rte_eventdev *dev,
326 struct dlb_devargs *dlb_args)
328 struct dlb_eventdev *dlb;
331 dlb = dev->data->dev_private;
333 dlb->event_dev = dev; /* backlink */
335 evdev_dlb_default_info.driver_name = name;
337 dlb->max_num_events_override = dlb_args->max_num_events;
338 dlb->num_dir_credits_override = dlb_args->num_dir_credits_override;
339 dlb->defer_sched = dlb_args->defer_sched;
340 dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights;
342 /* Open the interface.
343 * For vdev mode, this means open the dlb kernel module.
345 err = dlb_iface_open(&dlb->qm_instance, name);
347 DLB_LOG_ERR("could not open event hardware device, err=%d\n",
352 err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision);
354 DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n",
359 err = dlb_hw_query_resources(dlb);
361 DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
365 err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode);
367 DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", err);
371 /* Complete xtstats runtime initialization */
372 err = dlb_xstats_init(dlb);
374 DLB_LOG_ERR("dlb: failed to init xstats, err=%d\n", err);
378 rte_spinlock_init(&dlb->qm_instance.resource_lock);
380 dlb_iface_low_level_io_init(dlb);
382 dlb_entry_points_init(dev);
388 dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
391 struct dlb_eventdev *dlb;
394 dlb = dev->data->dev_private;
396 evdev_dlb_default_info.driver_name = name;
398 err = dlb_iface_open(&dlb->qm_instance, name);
400 DLB_LOG_ERR("could not open event hardware device, err=%d\n",
405 err = dlb_hw_query_resources(dlb);
407 DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
411 dlb_iface_low_level_io_init(dlb);
413 dlb_entry_points_init(dev);
419 dlb_parse_params(const char *params,
421 struct dlb_devargs *dlb_args)
424 static const char * const args[] = { NUMA_NODE_ARG,
429 DLB_NUM_ATM_INFLIGHTS_ARG,
432 if (params && params[0] != '\0') {
433 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
435 if (kvlist == NULL) {
436 DLB_LOG_INFO("Ignoring unsupported parameters when creating device '%s'\n",
439 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
441 &dlb_args->socket_id);
443 DLB_LOG_ERR("%s: Error parsing numa node parameter",
445 rte_kvargs_free(kvlist);
449 ret = rte_kvargs_process(kvlist, DLB_MAX_NUM_EVENTS,
451 &dlb_args->max_num_events);
453 DLB_LOG_ERR("%s: Error parsing max_num_events parameter",
455 rte_kvargs_free(kvlist);
459 ret = rte_kvargs_process(kvlist,
462 &dlb_args->num_dir_credits_override);
464 DLB_LOG_ERR("%s: Error parsing num_dir_credits parameter",
466 rte_kvargs_free(kvlist);
470 ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
474 DLB_LOG_ERR("%s: Error parsing dev_id parameter",
476 rte_kvargs_free(kvlist);
480 ret = rte_kvargs_process(kvlist, DLB_DEFER_SCHED_ARG,
482 &dlb_args->defer_sched);
484 DLB_LOG_ERR("%s: Error parsing defer_sched parameter",
486 rte_kvargs_free(kvlist);
490 ret = rte_kvargs_process(kvlist,
491 DLB_NUM_ATM_INFLIGHTS_ARG,
492 set_num_atm_inflights,
493 &dlb_args->num_atm_inflights);
495 DLB_LOG_ERR("%s: Error parsing atm_inflights parameter",
497 rte_kvargs_free(kvlist);
501 rte_kvargs_free(kvlist);
506 RTE_LOG_REGISTER(eventdev_dlb_log_level, pmd.event.dlb, NOTICE);