event/dlb2: support ldb port specific COS
[dpdk.git] / drivers / event / dlb2 / pf / dlb2_pf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <stdio.h>
8 #include <sys/mman.h>
9 #include <fcntl.h>
10 #include <sys/time.h>
11 #include <errno.h>
12 #include <assert.h>
13 #include <unistd.h>
14 #include <string.h>
15
16 #include <rte_debug.h>
17 #include <rte_log.h>
18 #include <rte_dev.h>
19 #include <rte_devargs.h>
20 #include <rte_mbuf.h>
21 #include <rte_ring.h>
22 #include <rte_errno.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
25 #include <rte_cycles.h>
26 #include <rte_io.h>
27 #include <rte_pci.h>
28 #include <rte_bus_pci.h>
29 #include <rte_eventdev.h>
30 #include <eventdev_pmd.h>
31 #include <eventdev_pmd_pci.h>
32 #include <rte_memory.h>
33 #include <rte_string_fns.h>
34
35 #include "../dlb2_priv.h"
36 #include "../dlb2_iface.h"
37 #include "../dlb2_inline_fns.h"
38 #include "dlb2_main.h"
39 #include "base/dlb2_hw_types.h"
40 #include "base/dlb2_osdep.h"
41 #include "base/dlb2_resource.h"
42
43 static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD);
44 static unsigned int dlb2_qe_sa_pct = 1;
45 static unsigned int dlb2_qid_sa_pct;
46
47 static void
48 dlb2_pf_low_level_io_init(void)
49 {
50         int i;
51         /* Addresses will be initialized at port create */
52         for (i = 0; i < DLB2_MAX_NUM_PORTS(DLB2_HW_V2_5); i++) {
53                 /* First directed ports */
54                 dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
55                 dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
56                 dlb2_port[i][DLB2_DIR_PORT].mmaped = true;
57
58                 /* Now load balanced ports */
59                 dlb2_port[i][DLB2_LDB_PORT].pp_addr = NULL;
60                 dlb2_port[i][DLB2_LDB_PORT].cq_base = NULL;
61                 dlb2_port[i][DLB2_LDB_PORT].mmaped = true;
62         }
63 }
64
65 static int
66 dlb2_pf_open(struct dlb2_hw_dev *handle, const char *name)
67 {
68         RTE_SET_USED(handle);
69         RTE_SET_USED(name);
70
71         return 0;
72 }
73
74 static int
75 dlb2_pf_get_device_version(struct dlb2_hw_dev *handle,
76                            uint8_t *revision)
77 {
78         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
79
80         *revision = dlb2_dev->revision;
81
82         return 0;
83 }
84
85 static void dlb2_pf_calc_arbiter_weights(u8 *weight,
86                                          unsigned int pct)
87 {
88         int val, i;
89
90         /* Largest possible weight (100% SA case): 32 */
91         val = (DLB2_MAX_WEIGHT + 1) / DLB2_NUM_ARB_WEIGHTS;
92
93         /* Scale val according to the starvation avoidance percentage */
94         val = (val * pct) / 100;
95         if (val == 0 && pct != 0)
96                 val = 1;
97
98         /* Prio 7 always has weight 0xff */
99         weight[DLB2_NUM_ARB_WEIGHTS - 1] = DLB2_MAX_WEIGHT;
100
101         for (i = DLB2_NUM_ARB_WEIGHTS - 2; i >= 0; i--)
102                 weight[i] = weight[i + 1] - val;
103 }
104
105
106 static void
107 dlb2_pf_hardware_init(struct dlb2_hw_dev *handle)
108 {
109         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
110
111         dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw);
112         dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw);
113
114         /* Configure arbitration weights for QE selection */
115         if (dlb2_qe_sa_pct <= 100) {
116                 u8 weight[DLB2_NUM_ARB_WEIGHTS];
117
118                 dlb2_pf_calc_arbiter_weights(weight,
119                                              dlb2_qe_sa_pct);
120
121                 dlb2_hw_set_qe_arbiter_weights(&dlb2_dev->hw, weight);
122         }
123
124         /* Configure arbitration weights for QID selection */
125         if (dlb2_qid_sa_pct <= 100) {
126                 u8 weight[DLB2_NUM_ARB_WEIGHTS];
127
128                 dlb2_pf_calc_arbiter_weights(weight,
129                                              dlb2_qid_sa_pct);
130
131                 dlb2_hw_set_qid_arbiter_weights(&dlb2_dev->hw, weight);
132         }
133
134 }
135
136 static int
137 dlb2_pf_get_num_resources(struct dlb2_hw_dev *handle,
138                           struct dlb2_get_num_resources_args *rsrcs)
139 {
140         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
141
142         return dlb2_hw_get_num_resources(&dlb2_dev->hw, rsrcs, false, 0);
143 }
144
145 static int
146 dlb2_pf_get_cq_poll_mode(struct dlb2_hw_dev *handle,
147                          enum dlb2_cq_poll_modes *mode)
148 {
149         RTE_SET_USED(handle);
150
151         *mode = DLB2_CQ_POLL_MODE_SPARSE;
152
153         return 0;
154 }
155
156 static int
157 dlb2_pf_sched_domain_create(struct dlb2_hw_dev *handle,
158                             struct dlb2_create_sched_domain_args *arg)
159 {
160         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
161         struct dlb2_cmd_response response = {0};
162         int ret;
163
164         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
165
166         if (dlb2_dev->domain_reset_failed) {
167                 response.status = DLB2_ST_DOMAIN_RESET_FAILED;
168                 ret = -EINVAL;
169                 goto done;
170         }
171
172         ret = dlb2_pf_create_sched_domain(&dlb2_dev->hw, arg, &response);
173         if (ret)
174                 goto done;
175
176 done:
177
178         arg->response = response;
179
180         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
181                   __func__, ret);
182
183         return ret;
184 }
185
186 static void
187 dlb2_pf_domain_reset(struct dlb2_eventdev *dlb2)
188 {
189         struct dlb2_dev *dlb2_dev;
190         int ret;
191
192         dlb2_dev = (struct dlb2_dev *)dlb2->qm_instance.pf_dev;
193         ret = dlb2_pf_reset_domain(&dlb2_dev->hw, dlb2->qm_instance.domain_id);
194         if (ret)
195                 DLB2_LOG_ERR("dlb2_pf_reset_domain err %d", ret);
196 }
197
198 static int
199 dlb2_pf_ldb_queue_create(struct dlb2_hw_dev *handle,
200                          struct dlb2_create_ldb_queue_args *cfg)
201 {
202         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
203         struct dlb2_cmd_response response = {0};
204         int ret;
205
206         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
207
208         ret = dlb2_pf_create_ldb_queue(&dlb2_dev->hw,
209                                        handle->domain_id,
210                                        cfg,
211                                        &response);
212
213         cfg->response = response;
214
215         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
216                   __func__, ret);
217
218         return ret;
219 }
220
221 static int
222 dlb2_pf_get_sn_occupancy(struct dlb2_hw_dev *handle,
223                          struct dlb2_get_sn_occupancy_args *args)
224 {
225         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
226         struct dlb2_cmd_response response = {0};
227         int ret;
228
229         ret = dlb2_get_group_sequence_number_occupancy(&dlb2_dev->hw,
230                                                        args->group);
231
232         response.id = ret;
233         response.status = 0;
234
235         args->response = response;
236
237         return ret;
238 }
239
240 static int
241 dlb2_pf_get_sn_allocation(struct dlb2_hw_dev *handle,
242                           struct dlb2_get_sn_allocation_args *args)
243 {
244         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
245         struct dlb2_cmd_response response = {0};
246         int ret;
247
248         ret = dlb2_get_group_sequence_numbers(&dlb2_dev->hw, args->group);
249
250         response.id = ret;
251         response.status = 0;
252
253         args->response = response;
254
255         return ret;
256 }
257
258 static int
259 dlb2_pf_set_sn_allocation(struct dlb2_hw_dev *handle,
260                           struct dlb2_set_sn_allocation_args *args)
261 {
262         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
263         struct dlb2_cmd_response response = {0};
264         int ret;
265
266         ret = dlb2_set_group_sequence_numbers(&dlb2_dev->hw, args->group,
267                                               args->num);
268
269         response.status = 0;
270
271         args->response = response;
272
273         return ret;
274 }
275
276 static void *
277 dlb2_alloc_coherent_aligned(const struct rte_memzone **mz, uintptr_t *phys,
278                             size_t size, int align)
279 {
280         char mz_name[RTE_MEMZONE_NAMESIZE];
281         uint32_t core_id = rte_lcore_id();
282         unsigned int socket_id;
283
284         snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb2_pf_%lx",
285                  (unsigned long)rte_get_timer_cycles());
286         if (core_id == (unsigned int)LCORE_ID_ANY)
287                 core_id = rte_get_main_lcore();
288         socket_id = rte_lcore_to_socket_id(core_id);
289         *mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
290                                          RTE_MEMZONE_IOVA_CONTIG, align);
291         if (*mz == NULL) {
292                 DLB2_LOG_DBG("Unable to allocate DMA memory of size %zu bytes - %s\n",
293                              size, rte_strerror(rte_errno));
294                 *phys = 0;
295                 return NULL;
296         }
297         *phys = (*mz)->iova;
298         return (*mz)->addr;
299 }
300
301 static int
302 dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
303                         struct dlb2_create_ldb_port_args *cfg,
304                         enum dlb2_cq_poll_modes poll_mode)
305 {
306         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
307         struct dlb2_cmd_response response = {0};
308         struct dlb2_port_memory port_memory;
309         int ret, cq_alloc_depth;
310         uint8_t *port_base;
311         const struct rte_memzone *mz;
312         int alloc_sz, qe_sz;
313         phys_addr_t cq_base;
314         phys_addr_t pp_base;
315         int is_dir = false;
316
317         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
318
319         if (poll_mode == DLB2_CQ_POLL_MODE_STD)
320                 qe_sz = sizeof(struct dlb2_dequeue_qe);
321         else
322                 qe_sz = RTE_CACHE_LINE_SIZE;
323
324         /* Calculate the port memory required, and round up to the nearest
325          * cache line.
326          */
327         cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB2_MIN_HARDWARE_CQ_DEPTH);
328         alloc_sz = cq_alloc_depth * qe_sz;
329         alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
330
331         port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
332                                                 rte_mem_page_size());
333         if (port_base == NULL)
334                 return -ENOMEM;
335
336         /* Lock the page in memory */
337         ret = rte_mem_lock_page(port_base);
338         if (ret < 0) {
339                 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
340                 goto create_port_err;
341         }
342
343         memset(port_base, 0, alloc_sz);
344
345         ret = dlb2_pf_create_ldb_port(&dlb2_dev->hw,
346                                       handle->domain_id,
347                                       cfg,
348                                       cq_base,
349                                       &response);
350         if (ret)
351                 goto create_port_err;
352
353         pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
354         dlb2_port[response.id][DLB2_LDB_PORT].pp_addr =
355                 (void *)(pp_base + (rte_mem_page_size() * response.id));
356
357         dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
358         memset(&port_memory, 0, sizeof(port_memory));
359
360         dlb2_port[response.id][DLB2_LDB_PORT].mz = mz;
361
362         dlb2_list_init_head(&port_memory.list);
363
364         cfg->response = response;
365
366         return 0;
367
368 create_port_err:
369
370         rte_memzone_free(mz);
371
372         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
373                   __func__, ret);
374         return ret;
375 }
376
377 static int
378 dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
379                         struct dlb2_create_dir_port_args *cfg,
380                         enum dlb2_cq_poll_modes poll_mode)
381 {
382         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
383         struct dlb2_cmd_response response = {0};
384         struct dlb2_port_memory port_memory;
385         int ret;
386         uint8_t *port_base;
387         const struct rte_memzone *mz;
388         int alloc_sz, qe_sz;
389         phys_addr_t cq_base;
390         phys_addr_t pp_base;
391         int is_dir = true;
392
393         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
394
395         if (poll_mode == DLB2_CQ_POLL_MODE_STD)
396                 qe_sz = sizeof(struct dlb2_dequeue_qe);
397         else
398                 qe_sz = RTE_CACHE_LINE_SIZE;
399
400         /* Calculate the port memory required, and round up to the nearest
401          * cache line.
402          */
403         alloc_sz = cfg->cq_depth * qe_sz;
404         alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
405
406         port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
407                                                 rte_mem_page_size());
408         if (port_base == NULL)
409                 return -ENOMEM;
410
411         /* Lock the page in memory */
412         ret = rte_mem_lock_page(port_base);
413         if (ret < 0) {
414                 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
415                 goto create_port_err;
416         }
417
418         memset(port_base, 0, alloc_sz);
419
420         ret = dlb2_pf_create_dir_port(&dlb2_dev->hw,
421                                       handle->domain_id,
422                                       cfg,
423                                       cq_base,
424                                       &response);
425         if (ret)
426                 goto create_port_err;
427
428         pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
429         dlb2_port[response.id][DLB2_DIR_PORT].pp_addr =
430                 (void *)(pp_base + (rte_mem_page_size() * response.id));
431
432         dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
433                 (void *)(port_base);
434         memset(&port_memory, 0, sizeof(port_memory));
435
436         dlb2_port[response.id][DLB2_DIR_PORT].mz = mz;
437
438         dlb2_list_init_head(&port_memory.list);
439
440         cfg->response = response;
441
442         return 0;
443
444 create_port_err:
445
446         rte_memzone_free(mz);
447
448         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
449                   __func__, ret);
450
451         return ret;
452 }
453
454 static int
455 dlb2_pf_dir_queue_create(struct dlb2_hw_dev *handle,
456                          struct dlb2_create_dir_queue_args *cfg)
457 {
458         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
459         struct dlb2_cmd_response response = {0};
460         int ret;
461
462         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
463
464         ret = dlb2_pf_create_dir_queue(&dlb2_dev->hw,
465                                        handle->domain_id,
466                                        cfg,
467                                        &response);
468
469         cfg->response = response;
470
471         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
472                   __func__, ret);
473
474         return ret;
475 }
476
477 static int
478 dlb2_pf_map_qid(struct dlb2_hw_dev *handle,
479                 struct dlb2_map_qid_args *cfg)
480 {
481         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
482         struct dlb2_cmd_response response = {0};
483         int ret;
484
485         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
486
487         ret = dlb2_hw_map_qid(&dlb2_dev->hw,
488                               handle->domain_id,
489                               cfg,
490                               &response,
491                               false,
492                               0);
493
494         cfg->response = response;
495
496         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
497                   __func__, ret);
498
499         return ret;
500 }
501
502 static int
503 dlb2_pf_unmap_qid(struct dlb2_hw_dev *handle,
504                   struct dlb2_unmap_qid_args *cfg)
505 {
506         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
507         struct dlb2_cmd_response response = {0};
508         int ret;
509
510         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
511
512         ret = dlb2_hw_unmap_qid(&dlb2_dev->hw,
513                                 handle->domain_id,
514                                 cfg,
515                                 &response,
516                                 false,
517                                 0);
518
519         cfg->response = response;
520
521         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
522                   __func__, ret);
523
524         return ret;
525 }
526
527 static int
528 dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev *handle,
529                             struct dlb2_pending_port_unmaps_args *args)
530 {
531         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
532         struct dlb2_cmd_response response = {0};
533         int ret;
534
535         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
536
537         ret = dlb2_hw_pending_port_unmaps(&dlb2_dev->hw,
538                                           handle->domain_id,
539                                           args,
540                                           &response,
541                                           false,
542                                           0);
543
544         args->response = response;
545
546         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
547                   __func__, ret);
548
549         return ret;
550 }
551
552 static int
553 dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle,
554                            struct dlb2_start_domain_args *cfg)
555 {
556         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
557         struct dlb2_cmd_response response = {0};
558         int ret;
559
560         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
561
562         ret = dlb2_pf_start_domain(&dlb2_dev->hw,
563                                    handle->domain_id,
564                                    cfg,
565                                    &response);
566
567         cfg->response = response;
568
569         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
570                   __func__, ret);
571
572         return ret;
573 }
574
575 static int
576 dlb2_pf_get_ldb_queue_depth(struct dlb2_hw_dev *handle,
577                             struct dlb2_get_ldb_queue_depth_args *args)
578 {
579         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
580         struct dlb2_cmd_response response = {0};
581         int ret;
582
583         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
584
585         ret = dlb2_hw_get_ldb_queue_depth(&dlb2_dev->hw,
586                                           handle->domain_id,
587                                           args,
588                                           &response,
589                                           false,
590                                           0);
591
592         args->response = response;
593
594         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
595                   __func__, ret);
596
597         return ret;
598 }
599
600 static int
601 dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle,
602                             struct dlb2_get_dir_queue_depth_args *args)
603 {
604         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
605         struct dlb2_cmd_response response = {0};
606         int ret = 0;
607
608         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
609
610         ret = dlb2_hw_get_dir_queue_depth(&dlb2_dev->hw,
611                                           handle->domain_id,
612                                           args,
613                                           &response,
614                                           false,
615                                           0);
616
617         args->response = response;
618
619         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
620                   __func__, ret);
621
622         return ret;
623 }
624
625 static int
626 dlb2_pf_enable_cq_weight(struct dlb2_hw_dev *handle,
627                          struct dlb2_enable_cq_weight_args *args)
628 {
629         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
630         struct dlb2_cmd_response response = {0};
631         int ret = 0;
632
633         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
634
635         ret = dlb2_hw_enable_cq_weight(&dlb2_dev->hw,
636                                        handle->domain_id,
637                                        args,
638                                        &response,
639                                        false,
640                                        0);
641         args->response = response;
642
643         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
644                   __func__, ret);
645
646         return ret;
647 }
648
649 static int
650 dlb2_pf_set_cos_bandwidth(struct dlb2_hw_dev *handle,
651                           struct dlb2_set_cos_bw_args *args)
652 {
653         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
654         int ret = 0;
655
656         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
657
658         ret = dlb2_hw_set_cos_bandwidth(&dlb2_dev->hw,
659                                         args->cos_id,
660                                         args->bandwidth);
661
662         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
663                   __func__, ret);
664
665         return ret;
666 }
667
668 static void
669 dlb2_pf_iface_fn_ptrs_init(void)
670 {
671         dlb2_iface_low_level_io_init = dlb2_pf_low_level_io_init;
672         dlb2_iface_open = dlb2_pf_open;
673         dlb2_iface_domain_reset = dlb2_pf_domain_reset;
674         dlb2_iface_get_device_version = dlb2_pf_get_device_version;
675         dlb2_iface_hardware_init = dlb2_pf_hardware_init;
676         dlb2_iface_get_num_resources = dlb2_pf_get_num_resources;
677         dlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;
678         dlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;
679         dlb2_iface_ldb_queue_create = dlb2_pf_ldb_queue_create;
680         dlb2_iface_ldb_port_create = dlb2_pf_ldb_port_create;
681         dlb2_iface_dir_queue_create = dlb2_pf_dir_queue_create;
682         dlb2_iface_dir_port_create = dlb2_pf_dir_port_create;
683         dlb2_iface_map_qid = dlb2_pf_map_qid;
684         dlb2_iface_unmap_qid = dlb2_pf_unmap_qid;
685         dlb2_iface_get_ldb_queue_depth = dlb2_pf_get_ldb_queue_depth;
686         dlb2_iface_get_dir_queue_depth = dlb2_pf_get_dir_queue_depth;
687         dlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start;
688         dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps;
689         dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;
690         dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;
691         dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;
692         dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight;
693         dlb2_iface_set_cos_bw = dlb2_pf_set_cos_bandwidth;
694 }
695
696 /* PCI DEV HOOKS */
697 static int
698 dlb2_eventdev_pci_init(struct rte_eventdev *eventdev)
699 {
700         int ret = 0;
701         struct rte_pci_device *pci_dev;
702         struct dlb2_devargs dlb2_args = {
703                 .socket_id = rte_socket_id(),
704                 .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
705                 .num_dir_credits_override = -1,
706                 .qid_depth_thresholds = { {0} },
707                 .poll_interval = DLB2_POLL_INTERVAL_DEFAULT,
708                 .sw_credit_quanta = DLB2_SW_CREDIT_QUANTA_DEFAULT,
709                 .hw_credit_quanta = DLB2_SW_CREDIT_BATCH_SZ,
710                 .default_depth_thresh = DLB2_DEPTH_THRESH_DEFAULT,
711                 .max_cq_depth = DLB2_DEFAULT_CQ_DEPTH
712         };
713         struct dlb2_eventdev *dlb2;
714
715         DLB2_LOG_DBG("Enter with dev_id=%d socket_id=%d",
716                      eventdev->data->dev_id, eventdev->data->socket_id);
717
718         dlb2_pf_iface_fn_ptrs_init();
719
720         pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
721
722         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
723                 dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
724                 dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
725
726                 /* Probe the DLB2 PF layer */
727                 dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev);
728
729                 if (dlb2->qm_instance.pf_dev == NULL) {
730                         DLB2_LOG_ERR("DLB2 PF Probe failed with error %d\n",
731                                      rte_errno);
732                         ret = -rte_errno;
733                         goto dlb2_probe_failed;
734                 }
735
736                 /* Were we invoked with runtime parameters? */
737                 if (pci_dev->device.devargs) {
738                         ret = dlb2_parse_params(pci_dev->device.devargs->args,
739                                                 pci_dev->device.devargs->name,
740                                                 &dlb2_args,
741                                                 dlb2->version);
742                         if (ret) {
743                                 DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
744                                              ret, rte_errno);
745                                 goto dlb2_probe_failed;
746                         }
747                 }
748
749                 ret = dlb2_primary_eventdev_probe(eventdev,
750                                                   event_dlb2_pf_name,
751                                                   &dlb2_args);
752         } else {
753                 dlb2 = dlb2_pmd_priv(eventdev);
754                 dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
755                 ret = dlb2_secondary_eventdev_probe(eventdev,
756                                                     event_dlb2_pf_name);
757         }
758         if (ret)
759                 goto dlb2_probe_failed;
760
761         DLB2_LOG_INFO("DLB2 PF Probe success\n");
762
763         return 0;
764
765 dlb2_probe_failed:
766
767         DLB2_LOG_INFO("DLB2 PF Probe failed, ret=%d\n", ret);
768
769         return ret;
770 }
771
772 #define EVENTDEV_INTEL_VENDOR_ID 0x8086
773
774 static const struct rte_pci_id pci_id_dlb2_map[] = {
775         {
776                 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
777                                PCI_DEVICE_ID_INTEL_DLB2_PF)
778         },
779         {
780                 .vendor_id = 0,
781         },
782 };
783
784 static const struct rte_pci_id pci_id_dlb2_5_map[] = {
785         {
786                 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
787                                PCI_DEVICE_ID_INTEL_DLB2_5_PF)
788         },
789         {
790                 .vendor_id = 0,
791         },
792 };
793
794 static int
795 event_dlb2_pci_probe(struct rte_pci_driver *pci_drv,
796                      struct rte_pci_device *pci_dev)
797 {
798         int ret;
799
800         ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
801                                              sizeof(struct dlb2_eventdev),
802                                              dlb2_eventdev_pci_init,
803                                              event_dlb2_pf_name);
804         if (ret) {
805                 DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
806                                 "ret=%d\n", ret);
807         }
808
809         return ret;
810 }
811
812 static int
813 event_dlb2_pci_remove(struct rte_pci_device *pci_dev)
814 {
815         int ret;
816
817         ret = rte_event_pmd_pci_remove(pci_dev, NULL);
818
819         if (ret) {
820                 DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
821                                 "ret=%d\n", ret);
822         }
823
824         return ret;
825
826 }
827
828 static int
829 event_dlb2_5_pci_probe(struct rte_pci_driver *pci_drv,
830                        struct rte_pci_device *pci_dev)
831 {
832         int ret;
833
834         ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
835                                             sizeof(struct dlb2_eventdev),
836                                             dlb2_eventdev_pci_init,
837                                             event_dlb2_pf_name);
838         if (ret) {
839                 DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
840                                 "ret=%d\n", ret);
841         }
842
843         return ret;
844 }
845
846 static int
847 event_dlb2_5_pci_remove(struct rte_pci_device *pci_dev)
848 {
849         int ret;
850
851         ret = rte_event_pmd_pci_remove(pci_dev, NULL);
852
853         if (ret) {
854                 DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
855                                 "ret=%d\n", ret);
856         }
857
858         return ret;
859
860 }
861
862 static struct rte_pci_driver pci_eventdev_dlb2_pmd = {
863         .id_table = pci_id_dlb2_map,
864         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
865         .probe = event_dlb2_pci_probe,
866         .remove = event_dlb2_pci_remove,
867 };
868
869 static struct rte_pci_driver pci_eventdev_dlb2_5_pmd = {
870         .id_table = pci_id_dlb2_5_map,
871         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
872         .probe = event_dlb2_5_pci_probe,
873         .remove = event_dlb2_5_pci_remove,
874 };
875
876 RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd);
877 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map);
878
879 RTE_PMD_REGISTER_PCI(event_dlb2_5_pf, pci_eventdev_dlb2_5_pmd);
880 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_5_pf, pci_id_dlb2_5_map);