1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
14 #include <sys/fcntl.h>
16 #include <rte_common.h>
17 #include <rte_config.h>
18 #include <rte_cycles.h>
19 #include <rte_debug.h>
21 #include <rte_errno.h>
22 #include <rte_eventdev.h>
23 #include <rte_eventdev_pmd.h>
25 #include <rte_kvargs.h>
27 #include <rte_malloc.h>
29 #include <rte_prefetch.h>
31 #include <rte_string_fns.h>
33 #include "dlb2_priv.h"
34 #include "dlb2_iface.h"
35 #include "dlb2_inline_fns.h"
38 * Resources exposed to eventdev. Some values overridden at runtime using
39 * values returned by the DLB kernel driver.
41 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
42 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
44 static struct rte_event_dev_info evdev_dlb2_default_info = {
45 .driver_name = "", /* probe will set */
46 .min_dequeue_timeout_ns = DLB2_MIN_DEQUEUE_TIMEOUT_NS,
47 .max_dequeue_timeout_ns = DLB2_MAX_DEQUEUE_TIMEOUT_NS,
48 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB2_MAX_NUM_LDB_QUEUES)
49 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
51 .max_event_queues = DLB2_MAX_NUM_LDB_QUEUES,
53 .max_event_queue_flows = DLB2_MAX_NUM_FLOWS,
54 .max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
55 .max_event_priority_levels = DLB2_QID_PRIORITIES,
56 .max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
57 .max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
58 .max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
59 .max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
60 .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
61 .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS,
62 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
63 RTE_EVENT_DEV_CAP_EVENT_QOS |
64 RTE_EVENT_DEV_CAP_BURST_MODE |
65 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
66 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
67 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
70 struct process_local_port_data
71 dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];
74 * DUMMY - added so that xstats path will compile/link.
75 * Will be replaced by real version in a subsequent
79 dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
80 struct dlb2_eventdev_queue *queue)
88 /* override defaults with value(s) provided on command line */
90 dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
91 int *qid_depth_thresholds)
95 for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) {
96 if (qid_depth_thresholds[q] != 0)
97 dlb2->ev_queues[q].depth_threshold =
98 qid_depth_thresholds[q];
103 dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
105 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
106 struct dlb2_hw_resource_info *dlb2_info = &handle->info;
109 /* Query driver resources provisioned for this device */
111 ret = dlb2_iface_get_num_resources(handle,
112 &dlb2->hw_rsrc_query_results);
114 DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret);
118 /* Complete filling in device resource info returned to evdev app,
119 * overriding any default values.
120 * The capabilities (CAPs) were set at compile time.
123 evdev_dlb2_default_info.max_event_queues =
124 dlb2->hw_rsrc_query_results.num_ldb_queues;
126 evdev_dlb2_default_info.max_event_ports =
127 dlb2->hw_rsrc_query_results.num_ldb_ports;
129 evdev_dlb2_default_info.max_num_events =
130 dlb2->hw_rsrc_query_results.num_ldb_credits;
132 /* Save off values used when creating the scheduling domain. */
134 handle->info.num_sched_domains =
135 dlb2->hw_rsrc_query_results.num_sched_domains;
137 handle->info.hw_rsrc_max.nb_events_limit =
138 dlb2->hw_rsrc_query_results.num_ldb_credits;
140 handle->info.hw_rsrc_max.num_queues =
141 dlb2->hw_rsrc_query_results.num_ldb_queues +
142 dlb2->hw_rsrc_query_results.num_dir_ports;
144 handle->info.hw_rsrc_max.num_ldb_queues =
145 dlb2->hw_rsrc_query_results.num_ldb_queues;
147 handle->info.hw_rsrc_max.num_ldb_ports =
148 dlb2->hw_rsrc_query_results.num_ldb_ports;
150 handle->info.hw_rsrc_max.num_dir_ports =
151 dlb2->hw_rsrc_query_results.num_dir_ports;
153 handle->info.hw_rsrc_max.reorder_window_size =
154 dlb2->hw_rsrc_query_results.num_hist_list_entries;
156 rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info));
161 #define DLB2_BASE_10 10
164 dlb2_string_to_int(int *result, const char *str)
169 if (str == NULL || result == NULL)
173 ret = strtol(str, &endptr, DLB2_BASE_10);
177 /* long int and int may be different width for some architectures */
178 if (ret < INT_MIN || ret > INT_MAX || endptr == str)
186 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
188 int *socket_id = opaque;
191 ret = dlb2_string_to_int(socket_id, value);
195 if (*socket_id > RTE_MAX_NUMA_NODES)
201 set_max_num_events(const char *key __rte_unused,
205 int *max_num_events = opaque;
208 if (value == NULL || opaque == NULL) {
209 DLB2_LOG_ERR("NULL pointer\n");
213 ret = dlb2_string_to_int(max_num_events, value);
217 if (*max_num_events < 0 || *max_num_events >
218 DLB2_MAX_NUM_LDB_CREDITS) {
219 DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
220 DLB2_MAX_NUM_LDB_CREDITS);
228 set_num_dir_credits(const char *key __rte_unused,
232 int *num_dir_credits = opaque;
235 if (value == NULL || opaque == NULL) {
236 DLB2_LOG_ERR("NULL pointer\n");
240 ret = dlb2_string_to_int(num_dir_credits, value);
244 if (*num_dir_credits < 0 ||
245 *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) {
246 DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
247 DLB2_MAX_NUM_DIR_CREDITS);
255 set_dev_id(const char *key __rte_unused,
259 int *dev_id = opaque;
262 if (value == NULL || opaque == NULL) {
263 DLB2_LOG_ERR("NULL pointer\n");
267 ret = dlb2_string_to_int(dev_id, value);
275 set_cos(const char *key __rte_unused,
279 enum dlb2_cos *cos_id = opaque;
283 if (value == NULL || opaque == NULL) {
284 DLB2_LOG_ERR("NULL pointer\n");
288 ret = dlb2_string_to_int(&x, value);
292 if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) {
294 "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n",
306 set_qid_depth_thresh(const char *key __rte_unused,
310 struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
311 int first, last, thresh, i;
313 if (value == NULL || opaque == NULL) {
314 DLB2_LOG_ERR("NULL pointer\n");
318 /* command line override may take one of the following 3 forms:
319 * qid_depth_thresh=all:<threshold_value> ... all queues
320 * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
321 * qid_depth_thresh=qid:<threshold_value> ... just one queue
323 if (sscanf(value, "all:%d", &thresh) == 1) {
325 last = DLB2_MAX_NUM_QUEUES - 1;
326 } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
327 /* we have everything we need */
328 } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
331 DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
335 if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) {
336 DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
340 if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
341 DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
342 DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
346 for (i = first; i <= last; i++)
347 qid_thresh->val[i] = thresh; /* indexed by qid */
353 dlb2_entry_points_init(struct rte_eventdev *dev)
355 /* Expose PMD's eventdev interface */
356 static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
357 .dump = dlb2_eventdev_dump,
358 .xstats_get = dlb2_eventdev_xstats_get,
359 .xstats_get_names = dlb2_eventdev_xstats_get_names,
360 .xstats_get_by_name = dlb2_eventdev_xstats_get_by_name,
361 .xstats_reset = dlb2_eventdev_xstats_reset,
364 dev->dev_ops = &dlb2_eventdev_entry_ops;
368 dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
370 struct dlb2_devargs *dlb2_args)
372 struct dlb2_eventdev *dlb2;
375 dlb2 = dev->data->dev_private;
377 dlb2->event_dev = dev; /* backlink */
379 evdev_dlb2_default_info.driver_name = name;
381 dlb2->max_num_events_override = dlb2_args->max_num_events;
382 dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override;
383 dlb2->qm_instance.cos_id = dlb2_args->cos_id;
385 err = dlb2_iface_open(&dlb2->qm_instance, name);
387 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
392 err = dlb2_iface_get_device_version(&dlb2->qm_instance,
395 DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n",
400 err = dlb2_hw_query_resources(dlb2);
402 DLB2_LOG_ERR("get resources err=%d for %s\n",
407 dlb2_iface_hardware_init(&dlb2->qm_instance);
409 err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
411 DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n",
416 /* Complete xtstats runtime initialization */
417 err = dlb2_xstats_init(dlb2);
419 DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d\n", err);
423 rte_spinlock_init(&dlb2->qm_instance.resource_lock);
425 dlb2_iface_low_level_io_init();
427 dlb2_entry_points_init(dev);
429 dlb2_init_queue_depth_thresholds(dlb2,
430 dlb2_args->qid_depth_thresholds.val);
436 dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
439 struct dlb2_eventdev *dlb2;
442 dlb2 = dev->data->dev_private;
444 evdev_dlb2_default_info.driver_name = name;
446 err = dlb2_iface_open(&dlb2->qm_instance, name);
448 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
453 err = dlb2_hw_query_resources(dlb2);
455 DLB2_LOG_ERR("get resources err=%d for %s\n",
460 dlb2_iface_low_level_io_init();
462 dlb2_entry_points_init(dev);
468 dlb2_parse_params(const char *params,
470 struct dlb2_devargs *dlb2_args)
473 static const char * const args[] = { NUMA_NODE_ARG,
475 DLB2_NUM_DIR_CREDITS,
477 DLB2_QID_DEPTH_THRESH_ARG,
481 if (params != NULL && params[0] != '\0') {
482 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
484 if (kvlist == NULL) {
486 "Ignoring unsupported parameters when creating device '%s'\n",
489 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
491 &dlb2_args->socket_id);
493 DLB2_LOG_ERR("%s: Error parsing numa node parameter",
495 rte_kvargs_free(kvlist);
499 ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
501 &dlb2_args->max_num_events);
503 DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
505 rte_kvargs_free(kvlist);
509 ret = rte_kvargs_process(kvlist,
510 DLB2_NUM_DIR_CREDITS,
512 &dlb2_args->num_dir_credits_override);
514 DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
516 rte_kvargs_free(kvlist);
520 ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
524 DLB2_LOG_ERR("%s: Error parsing dev_id parameter",
526 rte_kvargs_free(kvlist);
530 ret = rte_kvargs_process(
532 DLB2_QID_DEPTH_THRESH_ARG,
533 set_qid_depth_thresh,
534 &dlb2_args->qid_depth_thresholds);
536 DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
538 rte_kvargs_free(kvlist);
542 ret = rte_kvargs_process(kvlist, DLB2_COS_ARG,
546 DLB2_LOG_ERR("%s: Error parsing cos parameter",
548 rte_kvargs_free(kvlist);
552 rte_kvargs_free(kvlist);
557 RTE_LOG_REGISTER(eventdev_dlb2_log_level, pmd.event.dlb2, NOTICE);