event/dlb2: add v2.5 queue depth functions
[dpdk.git] / drivers / event / dlb2 / pf / dlb2_pf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <stdio.h>
8 #include <sys/mman.h>
9 #include <fcntl.h>
10 #include <sys/time.h>
11 #include <errno.h>
12 #include <assert.h>
13 #include <unistd.h>
14 #include <string.h>
15
16 #include <rte_debug.h>
17 #include <rte_log.h>
18 #include <rte_dev.h>
19 #include <rte_devargs.h>
20 #include <rte_mbuf.h>
21 #include <rte_ring.h>
22 #include <rte_errno.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
25 #include <rte_cycles.h>
26 #include <rte_io.h>
27 #include <rte_pci.h>
28 #include <rte_bus_pci.h>
29 #include <rte_eventdev.h>
30 #include <eventdev_pmd.h>
31 #include <eventdev_pmd_pci.h>
32 #include <rte_memory.h>
33 #include <rte_string_fns.h>
34
35 #define DLB2_USE_NEW_HEADERS /* TEMPORARY FOR MERGE */
36
37 #include "../dlb2_priv.h"
38 #include "../dlb2_iface.h"
39 #include "../dlb2_inline_fns.h"
40 #include "dlb2_main.h"
41 #include "base/dlb2_hw_types_new.h"
42 #include "base/dlb2_osdep.h"
43 #include "base/dlb2_resource_new.h"
44
45 static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD);
46
47 static void
48 dlb2_pf_low_level_io_init(void)
49 {
50         int i;
51         /* Addresses will be initialized at port create */
52         for (i = 0; i < DLB2_MAX_NUM_PORTS(DLB2_HW_V2_5); i++) {
53                 /* First directed ports */
54                 dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
55                 dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
56                 dlb2_port[i][DLB2_DIR_PORT].mmaped = true;
57
58                 /* Now load balanced ports */
59                 dlb2_port[i][DLB2_LDB_PORT].pp_addr = NULL;
60                 dlb2_port[i][DLB2_LDB_PORT].cq_base = NULL;
61                 dlb2_port[i][DLB2_LDB_PORT].mmaped = true;
62         }
63 }
64
65 static int
66 dlb2_pf_open(struct dlb2_hw_dev *handle, const char *name)
67 {
68         RTE_SET_USED(handle);
69         RTE_SET_USED(name);
70
71         return 0;
72 }
73
74 static int
75 dlb2_pf_get_device_version(struct dlb2_hw_dev *handle,
76                            uint8_t *revision)
77 {
78         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
79
80         *revision = dlb2_dev->revision;
81
82         return 0;
83 }
84
85 static void
86 dlb2_pf_hardware_init(struct dlb2_hw_dev *handle)
87 {
88         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
89
90         dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw);
91         dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw);
92 }
93
94 static int
95 dlb2_pf_get_num_resources(struct dlb2_hw_dev *handle,
96                           struct dlb2_get_num_resources_args *rsrcs)
97 {
98         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
99
100         return dlb2_hw_get_num_resources(&dlb2_dev->hw, rsrcs, false, 0);
101 }
102
103 static int
104 dlb2_pf_get_cq_poll_mode(struct dlb2_hw_dev *handle,
105                          enum dlb2_cq_poll_modes *mode)
106 {
107         RTE_SET_USED(handle);
108
109         *mode = DLB2_CQ_POLL_MODE_SPARSE;
110
111         return 0;
112 }
113
114 static int
115 dlb2_pf_sched_domain_create(struct dlb2_hw_dev *handle,
116                             struct dlb2_create_sched_domain_args *arg)
117 {
118         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
119         struct dlb2_cmd_response response = {0};
120         int ret;
121
122         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
123
124         if (dlb2_dev->domain_reset_failed) {
125                 response.status = DLB2_ST_DOMAIN_RESET_FAILED;
126                 ret = -EINVAL;
127                 goto done;
128         }
129
130         ret = dlb2_pf_create_sched_domain(&dlb2_dev->hw, arg, &response);
131         if (ret)
132                 goto done;
133
134 done:
135
136         arg->response = response;
137
138         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
139                   __func__, ret);
140
141         return ret;
142 }
143
144 static void
145 dlb2_pf_domain_reset(struct dlb2_eventdev *dlb2)
146 {
147         struct dlb2_dev *dlb2_dev;
148         int ret;
149
150         dlb2_dev = (struct dlb2_dev *)dlb2->qm_instance.pf_dev;
151         ret = dlb2_pf_reset_domain(&dlb2_dev->hw, dlb2->qm_instance.domain_id);
152         if (ret)
153                 DLB2_LOG_ERR("dlb2_pf_reset_domain err %d", ret);
154 }
155
156 static int
157 dlb2_pf_ldb_queue_create(struct dlb2_hw_dev *handle,
158                          struct dlb2_create_ldb_queue_args *cfg)
159 {
160         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
161         struct dlb2_cmd_response response = {0};
162         int ret;
163
164         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
165
166         ret = dlb2_pf_create_ldb_queue(&dlb2_dev->hw,
167                                        handle->domain_id,
168                                        cfg,
169                                        &response);
170
171         cfg->response = response;
172
173         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
174                   __func__, ret);
175
176         return ret;
177 }
178
179 static int
180 dlb2_pf_get_sn_occupancy(struct dlb2_hw_dev *handle,
181                          struct dlb2_get_sn_occupancy_args *args)
182 {
183         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
184         struct dlb2_cmd_response response = {0};
185         int ret;
186
187         ret = dlb2_get_group_sequence_number_occupancy(&dlb2_dev->hw,
188                                                        args->group);
189
190         response.id = ret;
191         response.status = 0;
192
193         args->response = response;
194
195         return ret;
196 }
197
198 static int
199 dlb2_pf_get_sn_allocation(struct dlb2_hw_dev *handle,
200                           struct dlb2_get_sn_allocation_args *args)
201 {
202         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
203         struct dlb2_cmd_response response = {0};
204         int ret;
205
206         ret = dlb2_get_group_sequence_numbers(&dlb2_dev->hw, args->group);
207
208         response.id = ret;
209         response.status = 0;
210
211         args->response = response;
212
213         return ret;
214 }
215
216 static int
217 dlb2_pf_set_sn_allocation(struct dlb2_hw_dev *handle,
218                           struct dlb2_set_sn_allocation_args *args)
219 {
220         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
221         struct dlb2_cmd_response response = {0};
222         int ret;
223
224         ret = dlb2_set_group_sequence_numbers(&dlb2_dev->hw, args->group,
225                                               args->num);
226
227         response.status = 0;
228
229         args->response = response;
230
231         return ret;
232 }
233
234 static void *
235 dlb2_alloc_coherent_aligned(const struct rte_memzone **mz, uintptr_t *phys,
236                             size_t size, int align)
237 {
238         char mz_name[RTE_MEMZONE_NAMESIZE];
239         uint32_t core_id = rte_lcore_id();
240         unsigned int socket_id;
241
242         snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb2_pf_%lx",
243                  (unsigned long)rte_get_timer_cycles());
244         if (core_id == (unsigned int)LCORE_ID_ANY)
245                 core_id = rte_get_main_lcore();
246         socket_id = rte_lcore_to_socket_id(core_id);
247         *mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
248                                          RTE_MEMZONE_IOVA_CONTIG, align);
249         if (*mz == NULL) {
250                 DLB2_LOG_DBG("Unable to allocate DMA memory of size %zu bytes - %s\n",
251                              size, rte_strerror(rte_errno));
252                 *phys = 0;
253                 return NULL;
254         }
255         *phys = (*mz)->iova;
256         return (*mz)->addr;
257 }
258
259 static int
260 dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
261                         struct dlb2_create_ldb_port_args *cfg,
262                         enum dlb2_cq_poll_modes poll_mode)
263 {
264         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
265         struct dlb2_cmd_response response = {0};
266         struct dlb2_port_memory port_memory;
267         int ret, cq_alloc_depth;
268         uint8_t *port_base;
269         const struct rte_memzone *mz;
270         int alloc_sz, qe_sz;
271         phys_addr_t cq_base;
272         phys_addr_t pp_base;
273         int is_dir = false;
274
275         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
276
277         if (poll_mode == DLB2_CQ_POLL_MODE_STD)
278                 qe_sz = sizeof(struct dlb2_dequeue_qe);
279         else
280                 qe_sz = RTE_CACHE_LINE_SIZE;
281
282         /* Calculate the port memory required, and round up to the nearest
283          * cache line.
284          */
285         cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB2_MIN_HARDWARE_CQ_DEPTH);
286         alloc_sz = cq_alloc_depth * qe_sz;
287         alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
288
289         port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
290                                                 rte_mem_page_size());
291         if (port_base == NULL)
292                 return -ENOMEM;
293
294         /* Lock the page in memory */
295         ret = rte_mem_lock_page(port_base);
296         if (ret < 0) {
297                 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
298                 goto create_port_err;
299         }
300
301         memset(port_base, 0, alloc_sz);
302
303         ret = dlb2_pf_create_ldb_port(&dlb2_dev->hw,
304                                       handle->domain_id,
305                                       cfg,
306                                       cq_base,
307                                       &response);
308         if (ret)
309                 goto create_port_err;
310
311         pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
312         dlb2_port[response.id][DLB2_LDB_PORT].pp_addr =
313                 (void *)(pp_base + (rte_mem_page_size() * response.id));
314
315         dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
316         memset(&port_memory, 0, sizeof(port_memory));
317
318         dlb2_port[response.id][DLB2_LDB_PORT].mz = mz;
319
320         dlb2_list_init_head(&port_memory.list);
321
322         cfg->response = response;
323
324         return 0;
325
326 create_port_err:
327
328         rte_memzone_free(mz);
329
330         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
331                   __func__, ret);
332         return ret;
333 }
334
335 static int
336 dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
337                         struct dlb2_create_dir_port_args *cfg,
338                         enum dlb2_cq_poll_modes poll_mode)
339 {
340         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
341         struct dlb2_cmd_response response = {0};
342         struct dlb2_port_memory port_memory;
343         int ret;
344         uint8_t *port_base;
345         const struct rte_memzone *mz;
346         int alloc_sz, qe_sz;
347         phys_addr_t cq_base;
348         phys_addr_t pp_base;
349         int is_dir = true;
350
351         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
352
353         if (poll_mode == DLB2_CQ_POLL_MODE_STD)
354                 qe_sz = sizeof(struct dlb2_dequeue_qe);
355         else
356                 qe_sz = RTE_CACHE_LINE_SIZE;
357
358         /* Calculate the port memory required, and round up to the nearest
359          * cache line.
360          */
361         alloc_sz = cfg->cq_depth * qe_sz;
362         alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
363
364         port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
365                                                 rte_mem_page_size());
366         if (port_base == NULL)
367                 return -ENOMEM;
368
369         /* Lock the page in memory */
370         ret = rte_mem_lock_page(port_base);
371         if (ret < 0) {
372                 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
373                 goto create_port_err;
374         }
375
376         memset(port_base, 0, alloc_sz);
377
378         ret = dlb2_pf_create_dir_port(&dlb2_dev->hw,
379                                       handle->domain_id,
380                                       cfg,
381                                       cq_base,
382                                       &response);
383         if (ret)
384                 goto create_port_err;
385
386         pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
387         dlb2_port[response.id][DLB2_DIR_PORT].pp_addr =
388                 (void *)(pp_base + (rte_mem_page_size() * response.id));
389
390         dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
391                 (void *)(port_base);
392         memset(&port_memory, 0, sizeof(port_memory));
393
394         dlb2_port[response.id][DLB2_DIR_PORT].mz = mz;
395
396         dlb2_list_init_head(&port_memory.list);
397
398         cfg->response = response;
399
400         return 0;
401
402 create_port_err:
403
404         rte_memzone_free(mz);
405
406         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
407                   __func__, ret);
408
409         return ret;
410 }
411
412 static int
413 dlb2_pf_dir_queue_create(struct dlb2_hw_dev *handle,
414                          struct dlb2_create_dir_queue_args *cfg)
415 {
416         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
417         struct dlb2_cmd_response response = {0};
418         int ret;
419
420         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
421
422         ret = dlb2_pf_create_dir_queue(&dlb2_dev->hw,
423                                        handle->domain_id,
424                                        cfg,
425                                        &response);
426
427         cfg->response = response;
428
429         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
430                   __func__, ret);
431
432         return ret;
433 }
434
435 static int
436 dlb2_pf_map_qid(struct dlb2_hw_dev *handle,
437                 struct dlb2_map_qid_args *cfg)
438 {
439         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
440         struct dlb2_cmd_response response = {0};
441         int ret;
442
443         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
444
445         ret = dlb2_hw_map_qid(&dlb2_dev->hw,
446                               handle->domain_id,
447                               cfg,
448                               &response,
449                               false,
450                               0);
451
452         cfg->response = response;
453
454         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
455                   __func__, ret);
456
457         return ret;
458 }
459
460 static int
461 dlb2_pf_unmap_qid(struct dlb2_hw_dev *handle,
462                   struct dlb2_unmap_qid_args *cfg)
463 {
464         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
465         struct dlb2_cmd_response response = {0};
466         int ret;
467
468         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
469
470         ret = dlb2_hw_unmap_qid(&dlb2_dev->hw,
471                                 handle->domain_id,
472                                 cfg,
473                                 &response,
474                                 false,
475                                 0);
476
477         cfg->response = response;
478
479         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
480                   __func__, ret);
481
482         return ret;
483 }
484
485 static int
486 dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev *handle,
487                             struct dlb2_pending_port_unmaps_args *args)
488 {
489         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
490         struct dlb2_cmd_response response = {0};
491         int ret;
492
493         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
494
495         ret = dlb2_hw_pending_port_unmaps(&dlb2_dev->hw,
496                                           handle->domain_id,
497                                           args,
498                                           &response,
499                                           false,
500                                           0);
501
502         args->response = response;
503
504         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
505                   __func__, ret);
506
507         return ret;
508 }
509
510 static int
511 dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle,
512                            struct dlb2_start_domain_args *cfg)
513 {
514         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
515         struct dlb2_cmd_response response = {0};
516         int ret;
517
518         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
519
520         ret = dlb2_pf_start_domain(&dlb2_dev->hw,
521                                    handle->domain_id,
522                                    cfg,
523                                    &response);
524
525         cfg->response = response;
526
527         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
528                   __func__, ret);
529
530         return ret;
531 }
532
533 static int
534 dlb2_pf_get_ldb_queue_depth(struct dlb2_hw_dev *handle,
535                             struct dlb2_get_ldb_queue_depth_args *args)
536 {
537         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
538         struct dlb2_cmd_response response = {0};
539         int ret;
540
541         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
542
543         ret = dlb2_hw_get_ldb_queue_depth(&dlb2_dev->hw,
544                                           handle->domain_id,
545                                           args,
546                                           &response,
547                                           false,
548                                           0);
549
550         args->response = response;
551
552         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
553                   __func__, ret);
554
555         return ret;
556 }
557
558 static int
559 dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle,
560                             struct dlb2_get_dir_queue_depth_args *args)
561 {
562         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
563         struct dlb2_cmd_response response = {0};
564         int ret = 0;
565
566         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
567
568         ret = dlb2_hw_get_dir_queue_depth(&dlb2_dev->hw,
569                                           handle->domain_id,
570                                           args,
571                                           &response,
572                                           false,
573                                           0);
574
575         args->response = response;
576
577         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
578                   __func__, ret);
579
580         return ret;
581 }
582
583 static void
584 dlb2_pf_iface_fn_ptrs_init(void)
585 {
586         dlb2_iface_low_level_io_init = dlb2_pf_low_level_io_init;
587         dlb2_iface_open = dlb2_pf_open;
588         dlb2_iface_domain_reset = dlb2_pf_domain_reset;
589         dlb2_iface_get_device_version = dlb2_pf_get_device_version;
590         dlb2_iface_hardware_init = dlb2_pf_hardware_init;
591         dlb2_iface_get_num_resources = dlb2_pf_get_num_resources;
592         dlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;
593         dlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;
594         dlb2_iface_ldb_queue_create = dlb2_pf_ldb_queue_create;
595         dlb2_iface_ldb_port_create = dlb2_pf_ldb_port_create;
596         dlb2_iface_dir_queue_create = dlb2_pf_dir_queue_create;
597         dlb2_iface_dir_port_create = dlb2_pf_dir_port_create;
598         dlb2_iface_map_qid = dlb2_pf_map_qid;
599         dlb2_iface_unmap_qid = dlb2_pf_unmap_qid;
600         dlb2_iface_get_ldb_queue_depth = dlb2_pf_get_ldb_queue_depth;
601         dlb2_iface_get_dir_queue_depth = dlb2_pf_get_dir_queue_depth;
602         dlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start;
603         dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps;
604         dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;
605         dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;
606         dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;
607 }
608
609 /* PCI DEV HOOKS */
610 static int
611 dlb2_eventdev_pci_init(struct rte_eventdev *eventdev)
612 {
613         int ret = 0;
614         struct rte_pci_device *pci_dev;
615         struct dlb2_devargs dlb2_args = {
616                 .socket_id = rte_socket_id(),
617                 .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
618                 .num_dir_credits_override = -1,
619                 .qid_depth_thresholds = { {0} },
620                 .cos_id = DLB2_COS_DEFAULT
621         };
622         struct dlb2_eventdev *dlb2;
623
624         DLB2_LOG_DBG("Enter with dev_id=%d socket_id=%d",
625                      eventdev->data->dev_id, eventdev->data->socket_id);
626
627         dlb2_pf_iface_fn_ptrs_init();
628
629         pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
630
631         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
632                 dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
633                 dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
634
635                 /* Probe the DLB2 PF layer */
636                 dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev);
637
638                 if (dlb2->qm_instance.pf_dev == NULL) {
639                         DLB2_LOG_ERR("DLB2 PF Probe failed with error %d\n",
640                                      rte_errno);
641                         ret = -rte_errno;
642                         goto dlb2_probe_failed;
643                 }
644
645                 /* Were we invoked with runtime parameters? */
646                 if (pci_dev->device.devargs) {
647                         ret = dlb2_parse_params(pci_dev->device.devargs->args,
648                                                 pci_dev->device.devargs->name,
649                                                 &dlb2_args,
650                                                 dlb2->version);
651                         if (ret) {
652                                 DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
653                                              ret, rte_errno);
654                                 goto dlb2_probe_failed;
655                         }
656                 }
657
658                 ret = dlb2_primary_eventdev_probe(eventdev,
659                                                   event_dlb2_pf_name,
660                                                   &dlb2_args);
661         } else {
662                 dlb2 = dlb2_pmd_priv(eventdev);
663                 dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
664                 ret = dlb2_secondary_eventdev_probe(eventdev,
665                                                     event_dlb2_pf_name);
666         }
667         if (ret)
668                 goto dlb2_probe_failed;
669
670         DLB2_LOG_INFO("DLB2 PF Probe success\n");
671
672         return 0;
673
674 dlb2_probe_failed:
675
676         DLB2_LOG_INFO("DLB2 PF Probe failed, ret=%d\n", ret);
677
678         return ret;
679 }
680
681 #define EVENTDEV_INTEL_VENDOR_ID 0x8086
682
683 static const struct rte_pci_id pci_id_dlb2_map[] = {
684         {
685                 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
686                                PCI_DEVICE_ID_INTEL_DLB2_PF)
687         },
688         {
689                 .vendor_id = 0,
690         },
691 };
692
693 static const struct rte_pci_id pci_id_dlb2_5_map[] = {
694         {
695                 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
696                                PCI_DEVICE_ID_INTEL_DLB2_5_PF)
697         },
698         {
699                 .vendor_id = 0,
700         },
701 };
702
703 static int
704 event_dlb2_pci_probe(struct rte_pci_driver *pci_drv,
705                      struct rte_pci_device *pci_dev)
706 {
707         int ret;
708
709         ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
710                                              sizeof(struct dlb2_eventdev),
711                                              dlb2_eventdev_pci_init,
712                                              event_dlb2_pf_name);
713         if (ret) {
714                 DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
715                                 "ret=%d\n", ret);
716         }
717
718         return ret;
719 }
720
721 static int
722 event_dlb2_pci_remove(struct rte_pci_device *pci_dev)
723 {
724         int ret;
725
726         ret = rte_event_pmd_pci_remove(pci_dev, NULL);
727
728         if (ret) {
729                 DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
730                                 "ret=%d\n", ret);
731         }
732
733         return ret;
734
735 }
736
737 static int
738 event_dlb2_5_pci_probe(struct rte_pci_driver *pci_drv,
739                        struct rte_pci_device *pci_dev)
740 {
741         int ret;
742
743         ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
744                                             sizeof(struct dlb2_eventdev),
745                                             dlb2_eventdev_pci_init,
746                                             event_dlb2_pf_name);
747         if (ret) {
748                 DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
749                                 "ret=%d\n", ret);
750         }
751
752         return ret;
753 }
754
755 static int
756 event_dlb2_5_pci_remove(struct rte_pci_device *pci_dev)
757 {
758         int ret;
759
760         ret = rte_event_pmd_pci_remove(pci_dev, NULL);
761
762         if (ret) {
763                 DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
764                                 "ret=%d\n", ret);
765         }
766
767         return ret;
768
769 }
770
771 static struct rte_pci_driver pci_eventdev_dlb2_pmd = {
772         .id_table = pci_id_dlb2_map,
773         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
774         .probe = event_dlb2_pci_probe,
775         .remove = event_dlb2_pci_remove,
776 };
777
778 static struct rte_pci_driver pci_eventdev_dlb2_5_pmd = {
779         .id_table = pci_id_dlb2_5_map,
780         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
781         .probe = event_dlb2_5_pci_probe,
782         .remove = event_dlb2_5_pci_remove,
783 };
784
785 RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd);
786 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map);
787
788 RTE_PMD_REGISTER_PCI(event_dlb2_5_pf, pci_eventdev_dlb2_5_pmd);
789 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_5_pf, pci_id_dlb2_5_map);