ef1c000436f9bcc6e5e232443bbfdff4134075d8
[dpdk.git] / drivers / event / dlb2 / dlb2.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include <assert.h>
6 #include <errno.h>
7 #include <nmmintrin.h>
8 #include <pthread.h>
9 #include <stdint.h>
10 #include <stdbool.h>
11 #include <stdio.h>
12 #include <string.h>
13 #include <sys/mman.h>
14 #include <sys/fcntl.h>
15
16 #include <rte_common.h>
17 #include <rte_config.h>
18 #include <rte_cycles.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_errno.h>
22 #include <rte_eventdev.h>
23 #include <rte_eventdev_pmd.h>
24 #include <rte_io.h>
25 #include <rte_kvargs.h>
26 #include <rte_log.h>
27 #include <rte_malloc.h>
28 #include <rte_mbuf.h>
29 #include <rte_prefetch.h>
30 #include <rte_ring.h>
31 #include <rte_string_fns.h>
32
33 #include "dlb2_priv.h"
34 #include "dlb2_iface.h"
35 #include "dlb2_inline_fns.h"
36
37 /*
38  * Resources exposed to eventdev. Some values overridden at runtime using
39  * values returned by the DLB kernel driver.
40  */
41 #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
42 #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
43 #endif
44 static struct rte_event_dev_info evdev_dlb2_default_info = {
45         .driver_name = "", /* probe will set */
46         .min_dequeue_timeout_ns = DLB2_MIN_DEQUEUE_TIMEOUT_NS,
47         .max_dequeue_timeout_ns = DLB2_MAX_DEQUEUE_TIMEOUT_NS,
48 #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB2_MAX_NUM_LDB_QUEUES)
49         .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
50 #else
51         .max_event_queues = DLB2_MAX_NUM_LDB_QUEUES,
52 #endif
53         .max_event_queue_flows = DLB2_MAX_NUM_FLOWS,
54         .max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
55         .max_event_priority_levels = DLB2_QID_PRIORITIES,
56         .max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
57         .max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
58         .max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
59         .max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
60         .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
61         .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS,
62         .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
63                           RTE_EVENT_DEV_CAP_EVENT_QOS |
64                           RTE_EVENT_DEV_CAP_BURST_MODE |
65                           RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
66                           RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
67                           RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
68 };
69
70 struct process_local_port_data
71 dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];
72
73 /*
74  * DUMMY - added so that xstats path will compile/link.
75  * Will be replaced by real version in a subsequent
76  * patch.
77  */
78 uint32_t
79 dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
80                      struct dlb2_eventdev_queue *queue)
81 {
82         RTE_SET_USED(dlb2);
83         RTE_SET_USED(queue);
84
85         return 0;
86 }
87
88 static void
89 dlb2_free_qe_mem(struct dlb2_port *qm_port)
90 {
91         if (qm_port == NULL)
92                 return;
93
94         rte_free(qm_port->qe4);
95         qm_port->qe4 = NULL;
96
97         rte_free(qm_port->int_arm_qe);
98         qm_port->int_arm_qe = NULL;
99
100         rte_free(qm_port->consume_qe);
101         qm_port->consume_qe = NULL;
102
103         rte_memzone_free(dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz);
104         dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
105 }
106
107 /* override defaults with value(s) provided on command line */
108 static void
109 dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
110                                  int *qid_depth_thresholds)
111 {
112         int q;
113
114         for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) {
115                 if (qid_depth_thresholds[q] != 0)
116                         dlb2->ev_queues[q].depth_threshold =
117                                 qid_depth_thresholds[q];
118         }
119 }
120
121 static int
122 dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
123 {
124         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
125         struct dlb2_hw_resource_info *dlb2_info = &handle->info;
126         int ret;
127
128         /* Query driver resources provisioned for this device */
129
130         ret = dlb2_iface_get_num_resources(handle,
131                                            &dlb2->hw_rsrc_query_results);
132         if (ret) {
133                 DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret);
134                 return ret;
135         }
136
137         /* Complete filling in device resource info returned to evdev app,
138          * overriding any default values.
139          * The capabilities (CAPs) were set at compile time.
140          */
141
142         evdev_dlb2_default_info.max_event_queues =
143                 dlb2->hw_rsrc_query_results.num_ldb_queues;
144
145         evdev_dlb2_default_info.max_event_ports =
146                 dlb2->hw_rsrc_query_results.num_ldb_ports;
147
148         evdev_dlb2_default_info.max_num_events =
149                 dlb2->hw_rsrc_query_results.num_ldb_credits;
150
151         /* Save off values used when creating the scheduling domain. */
152
153         handle->info.num_sched_domains =
154                 dlb2->hw_rsrc_query_results.num_sched_domains;
155
156         handle->info.hw_rsrc_max.nb_events_limit =
157                 dlb2->hw_rsrc_query_results.num_ldb_credits;
158
159         handle->info.hw_rsrc_max.num_queues =
160                 dlb2->hw_rsrc_query_results.num_ldb_queues +
161                 dlb2->hw_rsrc_query_results.num_dir_ports;
162
163         handle->info.hw_rsrc_max.num_ldb_queues =
164                 dlb2->hw_rsrc_query_results.num_ldb_queues;
165
166         handle->info.hw_rsrc_max.num_ldb_ports =
167                 dlb2->hw_rsrc_query_results.num_ldb_ports;
168
169         handle->info.hw_rsrc_max.num_dir_ports =
170                 dlb2->hw_rsrc_query_results.num_dir_ports;
171
172         handle->info.hw_rsrc_max.reorder_window_size =
173                 dlb2->hw_rsrc_query_results.num_hist_list_entries;
174
175         rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info));
176
177         return 0;
178 }
179
180 #define DLB2_BASE_10 10
181
182 static int
183 dlb2_string_to_int(int *result, const char *str)
184 {
185         long ret;
186         char *endptr;
187
188         if (str == NULL || result == NULL)
189                 return -EINVAL;
190
191         errno = 0;
192         ret = strtol(str, &endptr, DLB2_BASE_10);
193         if (errno)
194                 return -errno;
195
196         /* long int and int may be different width for some architectures */
197         if (ret < INT_MIN || ret > INT_MAX || endptr == str)
198                 return -EINVAL;
199
200         *result = ret;
201         return 0;
202 }
203
204 static int
205 set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
206 {
207         int *socket_id = opaque;
208         int ret;
209
210         ret = dlb2_string_to_int(socket_id, value);
211         if (ret < 0)
212                 return ret;
213
214         if (*socket_id > RTE_MAX_NUMA_NODES)
215                 return -EINVAL;
216         return 0;
217 }
218
219 static int
220 set_max_num_events(const char *key __rte_unused,
221                    const char *value,
222                    void *opaque)
223 {
224         int *max_num_events = opaque;
225         int ret;
226
227         if (value == NULL || opaque == NULL) {
228                 DLB2_LOG_ERR("NULL pointer\n");
229                 return -EINVAL;
230         }
231
232         ret = dlb2_string_to_int(max_num_events, value);
233         if (ret < 0)
234                 return ret;
235
236         if (*max_num_events < 0 || *max_num_events >
237                         DLB2_MAX_NUM_LDB_CREDITS) {
238                 DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
239                              DLB2_MAX_NUM_LDB_CREDITS);
240                 return -EINVAL;
241         }
242
243         return 0;
244 }
245
246 static int
247 set_num_dir_credits(const char *key __rte_unused,
248                     const char *value,
249                     void *opaque)
250 {
251         int *num_dir_credits = opaque;
252         int ret;
253
254         if (value == NULL || opaque == NULL) {
255                 DLB2_LOG_ERR("NULL pointer\n");
256                 return -EINVAL;
257         }
258
259         ret = dlb2_string_to_int(num_dir_credits, value);
260         if (ret < 0)
261                 return ret;
262
263         if (*num_dir_credits < 0 ||
264             *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) {
265                 DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
266                              DLB2_MAX_NUM_DIR_CREDITS);
267                 return -EINVAL;
268         }
269
270         return 0;
271 }
272
273 static int
274 set_dev_id(const char *key __rte_unused,
275            const char *value,
276            void *opaque)
277 {
278         int *dev_id = opaque;
279         int ret;
280
281         if (value == NULL || opaque == NULL) {
282                 DLB2_LOG_ERR("NULL pointer\n");
283                 return -EINVAL;
284         }
285
286         ret = dlb2_string_to_int(dev_id, value);
287         if (ret < 0)
288                 return ret;
289
290         return 0;
291 }
292
293 static int
294 set_cos(const char *key __rte_unused,
295         const char *value,
296         void *opaque)
297 {
298         enum dlb2_cos *cos_id = opaque;
299         int x = 0;
300         int ret;
301
302         if (value == NULL || opaque == NULL) {
303                 DLB2_LOG_ERR("NULL pointer\n");
304                 return -EINVAL;
305         }
306
307         ret = dlb2_string_to_int(&x, value);
308         if (ret < 0)
309                 return ret;
310
311         if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) {
312                 DLB2_LOG_ERR(
313                         "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n",
314                         x);
315                 return -EINVAL;
316         }
317
318         *cos_id = x;
319
320         return 0;
321 }
322
323
324 static int
325 set_qid_depth_thresh(const char *key __rte_unused,
326                      const char *value,
327                      void *opaque)
328 {
329         struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
330         int first, last, thresh, i;
331
332         if (value == NULL || opaque == NULL) {
333                 DLB2_LOG_ERR("NULL pointer\n");
334                 return -EINVAL;
335         }
336
337         /* command line override may take one of the following 3 forms:
338          * qid_depth_thresh=all:<threshold_value> ... all queues
339          * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
340          * qid_depth_thresh=qid:<threshold_value> ... just one queue
341          */
342         if (sscanf(value, "all:%d", &thresh) == 1) {
343                 first = 0;
344                 last = DLB2_MAX_NUM_QUEUES - 1;
345         } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
346                 /* we have everything we need */
347         } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
348                 last = first;
349         } else {
350                 DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
351                 return -EINVAL;
352         }
353
354         if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) {
355                 DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
356                 return -EINVAL;
357         }
358
359         if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
360                 DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
361                              DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
362                 return -EINVAL;
363         }
364
365         for (i = first; i <= last; i++)
366                 qid_thresh->val[i] = thresh; /* indexed by qid */
367
368         return 0;
369 }
370
371 static void
372 dlb2_eventdev_info_get(struct rte_eventdev *dev,
373                        struct rte_event_dev_info *dev_info)
374 {
375         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
376         int ret;
377
378         ret = dlb2_hw_query_resources(dlb2);
379         if (ret) {
380                 const struct rte_eventdev_data *data = dev->data;
381
382                 DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
383                              ret, data->dev_id);
384                 /* fn is void, so fall through and return values set up in
385                  * probe
386                  */
387         }
388
389         /* Add num resources currently owned by this domain.
390          * These would become available if the scheduling domain were reset due
391          * to the application recalling eventdev_configure to *reconfigure* the
392          * domain.
393          */
394         evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
395         evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
396         evdev_dlb2_default_info.max_num_events += dlb2->max_ldb_credits;
397
398         evdev_dlb2_default_info.max_event_queues =
399                 RTE_MIN(evdev_dlb2_default_info.max_event_queues,
400                         RTE_EVENT_MAX_QUEUES_PER_DEV);
401
402         evdev_dlb2_default_info.max_num_events =
403                 RTE_MIN(evdev_dlb2_default_info.max_num_events,
404                         dlb2->max_num_events_override);
405
406         *dev_info = evdev_dlb2_default_info;
407 }
408
409 static int
410 dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
411                             const struct dlb2_hw_rsrcs *resources_asked)
412 {
413         int ret = 0;
414         struct dlb2_create_sched_domain_args *cfg;
415
416         if (resources_asked == NULL) {
417                 DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter\n");
418                 ret = EINVAL;
419                 goto error_exit;
420         }
421
422         /* Map generic qm resources to dlb2 resources */
423         cfg = &handle->cfg.resources;
424
425         /* DIR ports and queues */
426
427         cfg->num_dir_ports = resources_asked->num_dir_ports;
428
429         cfg->num_dir_credits = resources_asked->num_dir_credits;
430
431         /* LDB queues */
432
433         cfg->num_ldb_queues = resources_asked->num_ldb_queues;
434
435         /* LDB ports */
436
437         cfg->cos_strict = 0; /* Best effort */
438         cfg->num_cos_ldb_ports[0] = 0;
439         cfg->num_cos_ldb_ports[1] = 0;
440         cfg->num_cos_ldb_ports[2] = 0;
441         cfg->num_cos_ldb_ports[3] = 0;
442
443         switch (handle->cos_id) {
444         case DLB2_COS_0:
445                 cfg->num_ldb_ports = 0; /* no don't care ports */
446                 cfg->num_cos_ldb_ports[0] =
447                         resources_asked->num_ldb_ports;
448                 break;
449         case DLB2_COS_1:
450                 cfg->num_ldb_ports = 0; /* no don't care ports */
451                 cfg->num_cos_ldb_ports[1] = resources_asked->num_ldb_ports;
452                 break;
453         case DLB2_COS_2:
454                 cfg->num_ldb_ports = 0; /* no don't care ports */
455                 cfg->num_cos_ldb_ports[2] = resources_asked->num_ldb_ports;
456                 break;
457         case DLB2_COS_3:
458                 cfg->num_ldb_ports = 0; /* no don't care ports */
459                 cfg->num_cos_ldb_ports[3] =
460                         resources_asked->num_ldb_ports;
461                 break;
462         case DLB2_COS_DEFAULT:
463                 /* all ldb ports are don't care ports from a cos perspective */
464                 cfg->num_ldb_ports =
465                         resources_asked->num_ldb_ports;
466                 break;
467         }
468
469         cfg->num_ldb_credits =
470                 resources_asked->num_ldb_credits;
471
472         cfg->num_atomic_inflights =
473                 DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
474                 cfg->num_ldb_queues;
475
476         cfg->num_hist_list_entries = resources_asked->num_ldb_ports *
477                 DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
478
479         DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
480                      cfg->num_ldb_queues,
481                      resources_asked->num_ldb_ports,
482                      cfg->num_dir_ports,
483                      cfg->num_atomic_inflights,
484                      cfg->num_hist_list_entries,
485                      cfg->num_ldb_credits,
486                      cfg->num_dir_credits);
487
488         /* Configure the QM */
489
490         ret = dlb2_iface_sched_domain_create(handle, cfg);
491         if (ret < 0) {
492                 DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s\n",
493                              ret,
494                              dlb2_error_strings[cfg->response.status]);
495
496                 goto error_exit;
497         }
498
499         handle->domain_id = cfg->response.id;
500         handle->cfg.configured = true;
501
502 error_exit:
503
504         return ret;
505 }
506
507 static void
508 dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
509 {
510         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
511         enum dlb2_configuration_state config_state;
512         int i, j;
513
514         dlb2_iface_domain_reset(dlb2);
515
516         /* Free all dynamically allocated port memory */
517         for (i = 0; i < dlb2->num_ports; i++)
518                 dlb2_free_qe_mem(&dlb2->ev_ports[i].qm_port);
519
520         /* If reconfiguring, mark the device's queues and ports as "previously
521          * configured." If the user doesn't reconfigure them, the PMD will
522          * reapply their previous configuration when the device is started.
523          */
524         config_state = (reconfig) ? DLB2_PREV_CONFIGURED :
525                 DLB2_NOT_CONFIGURED;
526
527         for (i = 0; i < dlb2->num_ports; i++) {
528                 dlb2->ev_ports[i].qm_port.config_state = config_state;
529                 /* Reset setup_done so ports can be reconfigured */
530                 dlb2->ev_ports[i].setup_done = false;
531                 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
532                         dlb2->ev_ports[i].link[j].mapped = false;
533         }
534
535         for (i = 0; i < dlb2->num_queues; i++)
536                 dlb2->ev_queues[i].qm_queue.config_state = config_state;
537
538         for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++)
539                 dlb2->ev_queues[i].setup_done = false;
540
541         dlb2->num_ports = 0;
542         dlb2->num_ldb_ports = 0;
543         dlb2->num_dir_ports = 0;
544         dlb2->num_queues = 0;
545         dlb2->num_ldb_queues = 0;
546         dlb2->num_dir_queues = 0;
547         dlb2->configured = false;
548 }
549
550 /* Note: 1 QM instance per QM device, QM instance/device == event device */
551 static int
552 dlb2_eventdev_configure(const struct rte_eventdev *dev)
553 {
554         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
555         struct dlb2_hw_dev *handle = &dlb2->qm_instance;
556         struct dlb2_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
557         const struct rte_eventdev_data *data = dev->data;
558         const struct rte_event_dev_config *config = &data->dev_conf;
559         int ret;
560
561         /* If this eventdev is already configured, we must release the current
562          * scheduling domain before attempting to configure a new one.
563          */
564         if (dlb2->configured) {
565                 dlb2_hw_reset_sched_domain(dev, true);
566
567                 ret = dlb2_hw_query_resources(dlb2);
568                 if (ret) {
569                         DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
570                                      ret, data->dev_id);
571                         return ret;
572                 }
573         }
574
575         if (config->nb_event_queues > rsrcs->num_queues) {
576                 DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
577                              config->nb_event_queues,
578                              rsrcs->num_queues);
579                 return -EINVAL;
580         }
581         if (config->nb_event_ports > (rsrcs->num_ldb_ports
582                         + rsrcs->num_dir_ports)) {
583                 DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
584                              config->nb_event_ports,
585                              (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
586                 return -EINVAL;
587         }
588         if (config->nb_events_limit > rsrcs->nb_events_limit) {
589                 DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
590                              config->nb_events_limit,
591                              rsrcs->nb_events_limit);
592                 return -EINVAL;
593         }
594
595         if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
596                 dlb2->global_dequeue_wait = false;
597         else {
598                 uint32_t timeout32;
599
600                 dlb2->global_dequeue_wait = true;
601
602                 /* note size mismatch of timeout vals in eventdev lib. */
603                 timeout32 = config->dequeue_timeout_ns;
604
605                 dlb2->global_dequeue_wait_ticks =
606                         timeout32 * (rte_get_timer_hz() / 1E9);
607         }
608
609         /* Does this platform support umonitor/umwait? */
610         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
611                 if (RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 0 &&
612                     RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 1) {
613                         DLB2_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE, must be 0 or 1.\n",
614                                      RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE);
615                         return -EINVAL;
616                 }
617                 dlb2->umwait_allowed = true;
618         }
619
620         rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
621         rsrcs->num_ldb_ports  = config->nb_event_ports - rsrcs->num_dir_ports;
622         /* 1 dir queue per dir port */
623         rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
624
625         /* Scale down nb_events_limit by 4 for directed credits, since there
626          * are 4x as many load-balanced credits.
627          */
628         rsrcs->num_ldb_credits = 0;
629         rsrcs->num_dir_credits = 0;
630
631         if (rsrcs->num_ldb_queues)
632                 rsrcs->num_ldb_credits = config->nb_events_limit;
633         if (rsrcs->num_dir_ports)
634                 rsrcs->num_dir_credits = config->nb_events_limit / 4;
635         if (dlb2->num_dir_credits_override != -1)
636                 rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
637
638         if (dlb2_hw_create_sched_domain(handle, rsrcs) < 0) {
639                 DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
640                 return -ENODEV;
641         }
642
643         dlb2->new_event_limit = config->nb_events_limit;
644         __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
645
646         /* Save number of ports/queues for this event dev */
647         dlb2->num_ports = config->nb_event_ports;
648         dlb2->num_queues = config->nb_event_queues;
649         dlb2->num_dir_ports = rsrcs->num_dir_ports;
650         dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
651         dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
652         dlb2->num_dir_queues = dlb2->num_dir_ports;
653         dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
654         dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
655         dlb2->dir_credit_pool = rsrcs->num_dir_credits;
656         dlb2->max_dir_credits = rsrcs->num_dir_credits;
657
658         dlb2->configured = true;
659
660         return 0;
661 }
662
663 static void
664 dlb2_eventdev_port_default_conf_get(struct rte_eventdev *dev,
665                                     uint8_t port_id,
666                                     struct rte_event_port_conf *port_conf)
667 {
668         RTE_SET_USED(port_id);
669         struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
670
671         port_conf->new_event_threshold = dlb2->new_event_limit;
672         port_conf->dequeue_depth = 32;
673         port_conf->enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH;
674         port_conf->event_port_cfg = 0;
675 }
676
677 static void
678 dlb2_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
679                                      uint8_t queue_id,
680                                      struct rte_event_queue_conf *queue_conf)
681 {
682         RTE_SET_USED(dev);
683         RTE_SET_USED(queue_id);
684
685         queue_conf->nb_atomic_flows = 1024;
686         queue_conf->nb_atomic_order_sequences = 64;
687         queue_conf->event_queue_cfg = 0;
688         queue_conf->priority = 0;
689 }
690
691 static void
692 dlb2_entry_points_init(struct rte_eventdev *dev)
693 {
694         /* Expose PMD's eventdev interface */
695         static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
696                 .dev_infos_get    = dlb2_eventdev_info_get,
697                 .dev_configure    = dlb2_eventdev_configure,
698                 .queue_def_conf   = dlb2_eventdev_queue_default_conf_get,
699                 .port_def_conf    = dlb2_eventdev_port_default_conf_get,
700                 .dump             = dlb2_eventdev_dump,
701                 .xstats_get       = dlb2_eventdev_xstats_get,
702                 .xstats_get_names = dlb2_eventdev_xstats_get_names,
703                 .xstats_get_by_name = dlb2_eventdev_xstats_get_by_name,
704                 .xstats_reset       = dlb2_eventdev_xstats_reset,
705         };
706
707         dev->dev_ops = &dlb2_eventdev_entry_ops;
708 }
709
710 int
711 dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
712                             const char *name,
713                             struct dlb2_devargs *dlb2_args)
714 {
715         struct dlb2_eventdev *dlb2;
716         int err;
717
718         dlb2 = dev->data->dev_private;
719
720         dlb2->event_dev = dev; /* backlink */
721
722         evdev_dlb2_default_info.driver_name = name;
723
724         dlb2->max_num_events_override = dlb2_args->max_num_events;
725         dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override;
726         dlb2->qm_instance.cos_id = dlb2_args->cos_id;
727
728         err = dlb2_iface_open(&dlb2->qm_instance, name);
729         if (err < 0) {
730                 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
731                              err);
732                 return err;
733         }
734
735         err = dlb2_iface_get_device_version(&dlb2->qm_instance,
736                                             &dlb2->revision);
737         if (err < 0) {
738                 DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n",
739                              err);
740                 return err;
741         }
742
743         err = dlb2_hw_query_resources(dlb2);
744         if (err) {
745                 DLB2_LOG_ERR("get resources err=%d for %s\n",
746                              err, name);
747                 return err;
748         }
749
750         dlb2_iface_hardware_init(&dlb2->qm_instance);
751
752         err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
753         if (err < 0) {
754                 DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n",
755                              err);
756                 return err;
757         }
758
759         /* Complete xtstats runtime initialization */
760         err = dlb2_xstats_init(dlb2);
761         if (err) {
762                 DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d\n", err);
763                 return err;
764         }
765
766         rte_spinlock_init(&dlb2->qm_instance.resource_lock);
767
768         dlb2_iface_low_level_io_init();
769
770         dlb2_entry_points_init(dev);
771
772         dlb2_init_queue_depth_thresholds(dlb2,
773                                          dlb2_args->qid_depth_thresholds.val);
774
775         return 0;
776 }
777
778 int
779 dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
780                               const char *name)
781 {
782         struct dlb2_eventdev *dlb2;
783         int err;
784
785         dlb2 = dev->data->dev_private;
786
787         evdev_dlb2_default_info.driver_name = name;
788
789         err = dlb2_iface_open(&dlb2->qm_instance, name);
790         if (err < 0) {
791                 DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
792                              err);
793                 return err;
794         }
795
796         err = dlb2_hw_query_resources(dlb2);
797         if (err) {
798                 DLB2_LOG_ERR("get resources err=%d for %s\n",
799                              err, name);
800                 return err;
801         }
802
803         dlb2_iface_low_level_io_init();
804
805         dlb2_entry_points_init(dev);
806
807         return 0;
808 }
809
810 int
811 dlb2_parse_params(const char *params,
812                   const char *name,
813                   struct dlb2_devargs *dlb2_args)
814 {
815         int ret = 0;
816         static const char * const args[] = { NUMA_NODE_ARG,
817                                              DLB2_MAX_NUM_EVENTS,
818                                              DLB2_NUM_DIR_CREDITS,
819                                              DEV_ID_ARG,
820                                              DLB2_QID_DEPTH_THRESH_ARG,
821                                              DLB2_COS_ARG,
822                                              NULL };
823
824         if (params != NULL && params[0] != '\0') {
825                 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
826
827                 if (kvlist == NULL) {
828                         RTE_LOG(INFO, PMD,
829                                 "Ignoring unsupported parameters when creating device '%s'\n",
830                                 name);
831                 } else {
832                         int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
833                                                      set_numa_node,
834                                                      &dlb2_args->socket_id);
835                         if (ret != 0) {
836                                 DLB2_LOG_ERR("%s: Error parsing numa node parameter",
837                                              name);
838                                 rte_kvargs_free(kvlist);
839                                 return ret;
840                         }
841
842                         ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
843                                                  set_max_num_events,
844                                                  &dlb2_args->max_num_events);
845                         if (ret != 0) {
846                                 DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
847                                              name);
848                                 rte_kvargs_free(kvlist);
849                                 return ret;
850                         }
851
852                         ret = rte_kvargs_process(kvlist,
853                                         DLB2_NUM_DIR_CREDITS,
854                                         set_num_dir_credits,
855                                         &dlb2_args->num_dir_credits_override);
856                         if (ret != 0) {
857                                 DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
858                                              name);
859                                 rte_kvargs_free(kvlist);
860                                 return ret;
861                         }
862
863                         ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
864                                                  set_dev_id,
865                                                  &dlb2_args->dev_id);
866                         if (ret != 0) {
867                                 DLB2_LOG_ERR("%s: Error parsing dev_id parameter",
868                                              name);
869                                 rte_kvargs_free(kvlist);
870                                 return ret;
871                         }
872
873                         ret = rte_kvargs_process(
874                                         kvlist,
875                                         DLB2_QID_DEPTH_THRESH_ARG,
876                                         set_qid_depth_thresh,
877                                         &dlb2_args->qid_depth_thresholds);
878                         if (ret != 0) {
879                                 DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
880                                              name);
881                                 rte_kvargs_free(kvlist);
882                                 return ret;
883                         }
884
885                         ret = rte_kvargs_process(kvlist, DLB2_COS_ARG,
886                                                  set_cos,
887                                                  &dlb2_args->cos_id);
888                         if (ret != 0) {
889                                 DLB2_LOG_ERR("%s: Error parsing cos parameter",
890                                              name);
891                                 rte_kvargs_free(kvlist);
892                                 return ret;
893                         }
894
895                         rte_kvargs_free(kvlist);
896                 }
897         }
898         return ret;
899 }
900 RTE_LOG_REGISTER(eventdev_dlb2_log_level, pmd.event.dlb2, NOTICE);