1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
14 #include <sys/fcntl.h>
16 #include <rte_common.h>
17 #include <rte_config.h>
18 #include <rte_cycles.h>
19 #include <rte_debug.h>
21 #include <rte_errno.h>
22 #include <rte_eventdev.h>
23 #include <rte_eventdev_pmd.h>
25 #include <rte_kvargs.h>
27 #include <rte_malloc.h>
29 #include <rte_prefetch.h>
31 #include <rte_string_fns.h>
33 #include "dlb2_priv.h"
34 #include "dlb2_iface.h"
35 #include "dlb2_inline_fns.h"
38 * Resources exposed to eventdev. Some values overridden at runtime using
39 * values returned by the DLB kernel driver.
41 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
42 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
44 static struct rte_event_dev_info evdev_dlb2_default_info = {
45 .driver_name = "", /* probe will set */
46 .min_dequeue_timeout_ns = DLB2_MIN_DEQUEUE_TIMEOUT_NS,
47 .max_dequeue_timeout_ns = DLB2_MAX_DEQUEUE_TIMEOUT_NS,
48 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB2_MAX_NUM_LDB_QUEUES)
49 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
51 .max_event_queues = DLB2_MAX_NUM_LDB_QUEUES,
53 .max_event_queue_flows = DLB2_MAX_NUM_FLOWS,
54 .max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
55 .max_event_priority_levels = DLB2_QID_PRIORITIES,
56 .max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
57 .max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
58 .max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
59 .max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
60 .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
61 .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS,
62 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
63 RTE_EVENT_DEV_CAP_EVENT_QOS |
64 RTE_EVENT_DEV_CAP_BURST_MODE |
65 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
66 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
67 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
70 struct process_local_port_data
71 dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];
73 /* override defaults with value(s) provided on command line */
75 dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
76 int *qid_depth_thresholds)
80 for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) {
81 if (qid_depth_thresholds[q] != 0)
82 dlb2->ev_queues[q].depth_threshold =
83 qid_depth_thresholds[q];
88 dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
90 struct dlb2_hw_dev *handle = &dlb2->qm_instance;
91 struct dlb2_hw_resource_info *dlb2_info = &handle->info;
94 /* Query driver resources provisioned for this device */
96 ret = dlb2_iface_get_num_resources(handle,
97 &dlb2->hw_rsrc_query_results);
99 DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret);
103 /* Complete filling in device resource info returned to evdev app,
104 * overriding any default values.
105 * The capabilities (CAPs) were set at compile time.
108 evdev_dlb2_default_info.max_event_queues =
109 dlb2->hw_rsrc_query_results.num_ldb_queues;
111 evdev_dlb2_default_info.max_event_ports =
112 dlb2->hw_rsrc_query_results.num_ldb_ports;
114 evdev_dlb2_default_info.max_num_events =
115 dlb2->hw_rsrc_query_results.num_ldb_credits;
117 /* Save off values used when creating the scheduling domain. */
119 handle->info.num_sched_domains =
120 dlb2->hw_rsrc_query_results.num_sched_domains;
122 handle->info.hw_rsrc_max.nb_events_limit =
123 dlb2->hw_rsrc_query_results.num_ldb_credits;
125 handle->info.hw_rsrc_max.num_queues =
126 dlb2->hw_rsrc_query_results.num_ldb_queues +
127 dlb2->hw_rsrc_query_results.num_dir_ports;
129 handle->info.hw_rsrc_max.num_ldb_queues =
130 dlb2->hw_rsrc_query_results.num_ldb_queues;
132 handle->info.hw_rsrc_max.num_ldb_ports =
133 dlb2->hw_rsrc_query_results.num_ldb_ports;
135 handle->info.hw_rsrc_max.num_dir_ports =
136 dlb2->hw_rsrc_query_results.num_dir_ports;
138 handle->info.hw_rsrc_max.reorder_window_size =
139 dlb2->hw_rsrc_query_results.num_hist_list_entries;
141 rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info));
146 #define DLB2_BASE_10 10
149 dlb2_string_to_int(int *result, const char *str)
154 if (str == NULL || result == NULL)
158 ret = strtol(str, &endptr, DLB2_BASE_10);
162 /* long int and int may be different width for some architectures */
163 if (ret < INT_MIN || ret > INT_MAX || endptr == str)
171 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
173 int *socket_id = opaque;
176 ret = dlb2_string_to_int(socket_id, value);
180 if (*socket_id > RTE_MAX_NUMA_NODES)
186 set_max_num_events(const char *key __rte_unused,
190 int *max_num_events = opaque;
193 if (value == NULL || opaque == NULL) {
194 DLB2_LOG_ERR("NULL pointer\n");
198 ret = dlb2_string_to_int(max_num_events, value);
202 if (*max_num_events < 0 || *max_num_events >
203 DLB2_MAX_NUM_LDB_CREDITS) {
204 DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
205 DLB2_MAX_NUM_LDB_CREDITS);
213 set_num_dir_credits(const char *key __rte_unused,
217 int *num_dir_credits = opaque;
220 if (value == NULL || opaque == NULL) {
221 DLB2_LOG_ERR("NULL pointer\n");
225 ret = dlb2_string_to_int(num_dir_credits, value);
229 if (*num_dir_credits < 0 ||
230 *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) {
231 DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
232 DLB2_MAX_NUM_DIR_CREDITS);
240 set_dev_id(const char *key __rte_unused,
244 int *dev_id = opaque;
247 if (value == NULL || opaque == NULL) {
248 DLB2_LOG_ERR("NULL pointer\n");
252 ret = dlb2_string_to_int(dev_id, value);
260 set_cos(const char *key __rte_unused,
264 enum dlb2_cos *cos_id = opaque;
268 if (value == NULL || opaque == NULL) {
269 DLB2_LOG_ERR("NULL pointer\n");
273 ret = dlb2_string_to_int(&x, value);
277 if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) {
279 "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n",
291 set_qid_depth_thresh(const char *key __rte_unused,
295 struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
296 int first, last, thresh, i;
298 if (value == NULL || opaque == NULL) {
299 DLB2_LOG_ERR("NULL pointer\n");
303 /* command line override may take one of the following 3 forms:
304 * qid_depth_thresh=all:<threshold_value> ... all queues
305 * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
306 * qid_depth_thresh=qid:<threshold_value> ... just one queue
308 if (sscanf(value, "all:%d", &thresh) == 1) {
310 last = DLB2_MAX_NUM_QUEUES - 1;
311 } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
312 /* we have everything we need */
313 } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
316 DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
320 if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) {
321 DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
325 if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
326 DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
327 DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
331 for (i = first; i <= last; i++)
332 qid_thresh->val[i] = thresh; /* indexed by qid */
338 dlb2_entry_points_init(struct rte_eventdev *dev)
342 /* Eventdev PMD entry points */
346 dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
348 struct dlb2_devargs *dlb2_args)
350 struct dlb2_eventdev *dlb2;
353 dlb2 = dev->data->dev_private;
355 dlb2->event_dev = dev; /* backlink */
357 evdev_dlb2_default_info.driver_name = name;
359 dlb2->max_num_events_override = dlb2_args->max_num_events;
360 dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override;
361 dlb2->qm_instance.cos_id = dlb2_args->cos_id;
363 err = dlb2_iface_open(&dlb2->qm_instance, name);
365 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
370 err = dlb2_iface_get_device_version(&dlb2->qm_instance,
373 DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n",
378 err = dlb2_hw_query_resources(dlb2);
380 DLB2_LOG_ERR("get resources err=%d for %s\n",
385 dlb2_iface_hardware_init(&dlb2->qm_instance);
387 err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
389 DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n",
394 rte_spinlock_init(&dlb2->qm_instance.resource_lock);
396 dlb2_iface_low_level_io_init();
398 dlb2_entry_points_init(dev);
400 dlb2_init_queue_depth_thresholds(dlb2,
401 dlb2_args->qid_depth_thresholds.val);
407 dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
410 struct dlb2_eventdev *dlb2;
413 dlb2 = dev->data->dev_private;
415 evdev_dlb2_default_info.driver_name = name;
417 err = dlb2_iface_open(&dlb2->qm_instance, name);
419 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
424 err = dlb2_hw_query_resources(dlb2);
426 DLB2_LOG_ERR("get resources err=%d for %s\n",
431 dlb2_iface_low_level_io_init();
433 dlb2_entry_points_init(dev);
439 dlb2_parse_params(const char *params,
441 struct dlb2_devargs *dlb2_args)
444 static const char * const args[] = { NUMA_NODE_ARG,
446 DLB2_NUM_DIR_CREDITS,
448 DLB2_QID_DEPTH_THRESH_ARG,
452 if (params != NULL && params[0] != '\0') {
453 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
455 if (kvlist == NULL) {
457 "Ignoring unsupported parameters when creating device '%s'\n",
460 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
462 &dlb2_args->socket_id);
464 DLB2_LOG_ERR("%s: Error parsing numa node parameter",
466 rte_kvargs_free(kvlist);
470 ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
472 &dlb2_args->max_num_events);
474 DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
476 rte_kvargs_free(kvlist);
480 ret = rte_kvargs_process(kvlist,
481 DLB2_NUM_DIR_CREDITS,
483 &dlb2_args->num_dir_credits_override);
485 DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
487 rte_kvargs_free(kvlist);
491 ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
495 DLB2_LOG_ERR("%s: Error parsing dev_id parameter",
497 rte_kvargs_free(kvlist);
501 ret = rte_kvargs_process(
503 DLB2_QID_DEPTH_THRESH_ARG,
504 set_qid_depth_thresh,
505 &dlb2_args->qid_depth_thresholds);
507 DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
509 rte_kvargs_free(kvlist);
513 ret = rte_kvargs_process(kvlist, DLB2_COS_ARG,
517 DLB2_LOG_ERR("%s: Error parsing cos parameter",
519 rte_kvargs_free(kvlist);
523 rte_kvargs_free(kvlist);
528 RTE_LOG_REGISTER(eventdev_dlb2_log_level, pmd.event.dlb2, NOTICE);