event/cnxk: add timer arm routine
[dpdk.git] / drivers / event / dlb2 / pf / dlb2_pf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <stdio.h>
8 #include <sys/mman.h>
9 #include <fcntl.h>
10 #include <sys/time.h>
11 #include <errno.h>
12 #include <assert.h>
13 #include <unistd.h>
14 #include <string.h>
15
16 #include <rte_debug.h>
17 #include <rte_log.h>
18 #include <rte_dev.h>
19 #include <rte_devargs.h>
20 #include <rte_mbuf.h>
21 #include <rte_ring.h>
22 #include <rte_errno.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
25 #include <rte_cycles.h>
26 #include <rte_io.h>
27 #include <rte_pci.h>
28 #include <rte_bus_pci.h>
29 #include <rte_eventdev.h>
30 #include <eventdev_pmd.h>
31 #include <eventdev_pmd_pci.h>
32 #include <rte_memory.h>
33 #include <rte_string_fns.h>
34
35 #include "../dlb2_priv.h"
36 #include "../dlb2_iface.h"
37 #include "../dlb2_inline_fns.h"
38 #include "dlb2_main.h"
39 #include "base/dlb2_hw_types.h"
40 #include "base/dlb2_osdep.h"
41 #include "base/dlb2_resource.h"
42
43 static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD);
44
45 static void
46 dlb2_pf_low_level_io_init(void)
47 {
48         int i;
49         /* Addresses will be initialized at port create */
50         for (i = 0; i < DLB2_MAX_NUM_PORTS(DLB2_HW_V2_5); i++) {
51                 /* First directed ports */
52                 dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
53                 dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
54                 dlb2_port[i][DLB2_DIR_PORT].mmaped = true;
55
56                 /* Now load balanced ports */
57                 dlb2_port[i][DLB2_LDB_PORT].pp_addr = NULL;
58                 dlb2_port[i][DLB2_LDB_PORT].cq_base = NULL;
59                 dlb2_port[i][DLB2_LDB_PORT].mmaped = true;
60         }
61 }
62
63 static int
64 dlb2_pf_open(struct dlb2_hw_dev *handle, const char *name)
65 {
66         RTE_SET_USED(handle);
67         RTE_SET_USED(name);
68
69         return 0;
70 }
71
72 static int
73 dlb2_pf_get_device_version(struct dlb2_hw_dev *handle,
74                            uint8_t *revision)
75 {
76         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
77
78         *revision = dlb2_dev->revision;
79
80         return 0;
81 }
82
83 static void
84 dlb2_pf_hardware_init(struct dlb2_hw_dev *handle)
85 {
86         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
87
88         dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw);
89         dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw);
90 }
91
92 static int
93 dlb2_pf_get_num_resources(struct dlb2_hw_dev *handle,
94                           struct dlb2_get_num_resources_args *rsrcs)
95 {
96         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
97
98         return dlb2_hw_get_num_resources(&dlb2_dev->hw, rsrcs, false, 0);
99 }
100
101 static int
102 dlb2_pf_get_cq_poll_mode(struct dlb2_hw_dev *handle,
103                          enum dlb2_cq_poll_modes *mode)
104 {
105         RTE_SET_USED(handle);
106
107         *mode = DLB2_CQ_POLL_MODE_SPARSE;
108
109         return 0;
110 }
111
112 static int
113 dlb2_pf_sched_domain_create(struct dlb2_hw_dev *handle,
114                             struct dlb2_create_sched_domain_args *arg)
115 {
116         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
117         struct dlb2_cmd_response response = {0};
118         int ret;
119
120         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
121
122         if (dlb2_dev->domain_reset_failed) {
123                 response.status = DLB2_ST_DOMAIN_RESET_FAILED;
124                 ret = -EINVAL;
125                 goto done;
126         }
127
128         ret = dlb2_pf_create_sched_domain(&dlb2_dev->hw, arg, &response);
129         if (ret)
130                 goto done;
131
132 done:
133
134         arg->response = response;
135
136         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
137                   __func__, ret);
138
139         return ret;
140 }
141
142 static void
143 dlb2_pf_domain_reset(struct dlb2_eventdev *dlb2)
144 {
145         struct dlb2_dev *dlb2_dev;
146         int ret;
147
148         dlb2_dev = (struct dlb2_dev *)dlb2->qm_instance.pf_dev;
149         ret = dlb2_pf_reset_domain(&dlb2_dev->hw, dlb2->qm_instance.domain_id);
150         if (ret)
151                 DLB2_LOG_ERR("dlb2_pf_reset_domain err %d", ret);
152 }
153
154 static int
155 dlb2_pf_ldb_queue_create(struct dlb2_hw_dev *handle,
156                          struct dlb2_create_ldb_queue_args *cfg)
157 {
158         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
159         struct dlb2_cmd_response response = {0};
160         int ret;
161
162         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
163
164         ret = dlb2_pf_create_ldb_queue(&dlb2_dev->hw,
165                                        handle->domain_id,
166                                        cfg,
167                                        &response);
168
169         cfg->response = response;
170
171         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
172                   __func__, ret);
173
174         return ret;
175 }
176
177 static int
178 dlb2_pf_get_sn_occupancy(struct dlb2_hw_dev *handle,
179                          struct dlb2_get_sn_occupancy_args *args)
180 {
181         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
182         struct dlb2_cmd_response response = {0};
183         int ret;
184
185         ret = dlb2_get_group_sequence_number_occupancy(&dlb2_dev->hw,
186                                                        args->group);
187
188         response.id = ret;
189         response.status = 0;
190
191         args->response = response;
192
193         return ret;
194 }
195
196 static int
197 dlb2_pf_get_sn_allocation(struct dlb2_hw_dev *handle,
198                           struct dlb2_get_sn_allocation_args *args)
199 {
200         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
201         struct dlb2_cmd_response response = {0};
202         int ret;
203
204         ret = dlb2_get_group_sequence_numbers(&dlb2_dev->hw, args->group);
205
206         response.id = ret;
207         response.status = 0;
208
209         args->response = response;
210
211         return ret;
212 }
213
214 static int
215 dlb2_pf_set_sn_allocation(struct dlb2_hw_dev *handle,
216                           struct dlb2_set_sn_allocation_args *args)
217 {
218         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
219         struct dlb2_cmd_response response = {0};
220         int ret;
221
222         ret = dlb2_set_group_sequence_numbers(&dlb2_dev->hw, args->group,
223                                               args->num);
224
225         response.status = 0;
226
227         args->response = response;
228
229         return ret;
230 }
231
232 static void *
233 dlb2_alloc_coherent_aligned(const struct rte_memzone **mz, uintptr_t *phys,
234                             size_t size, int align)
235 {
236         char mz_name[RTE_MEMZONE_NAMESIZE];
237         uint32_t core_id = rte_lcore_id();
238         unsigned int socket_id;
239
240         snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb2_pf_%lx",
241                  (unsigned long)rte_get_timer_cycles());
242         if (core_id == (unsigned int)LCORE_ID_ANY)
243                 core_id = rte_get_main_lcore();
244         socket_id = rte_lcore_to_socket_id(core_id);
245         *mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
246                                          RTE_MEMZONE_IOVA_CONTIG, align);
247         if (*mz == NULL) {
248                 DLB2_LOG_DBG("Unable to allocate DMA memory of size %zu bytes - %s\n",
249                              size, rte_strerror(rte_errno));
250                 *phys = 0;
251                 return NULL;
252         }
253         *phys = (*mz)->iova;
254         return (*mz)->addr;
255 }
256
257 static int
258 dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
259                         struct dlb2_create_ldb_port_args *cfg,
260                         enum dlb2_cq_poll_modes poll_mode)
261 {
262         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
263         struct dlb2_cmd_response response = {0};
264         struct dlb2_port_memory port_memory;
265         int ret, cq_alloc_depth;
266         uint8_t *port_base;
267         const struct rte_memzone *mz;
268         int alloc_sz, qe_sz;
269         phys_addr_t cq_base;
270         phys_addr_t pp_base;
271         int is_dir = false;
272
273         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
274
275         if (poll_mode == DLB2_CQ_POLL_MODE_STD)
276                 qe_sz = sizeof(struct dlb2_dequeue_qe);
277         else
278                 qe_sz = RTE_CACHE_LINE_SIZE;
279
280         /* Calculate the port memory required, and round up to the nearest
281          * cache line.
282          */
283         cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB2_MIN_HARDWARE_CQ_DEPTH);
284         alloc_sz = cq_alloc_depth * qe_sz;
285         alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
286
287         port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
288                                                 rte_mem_page_size());
289         if (port_base == NULL)
290                 return -ENOMEM;
291
292         /* Lock the page in memory */
293         ret = rte_mem_lock_page(port_base);
294         if (ret < 0) {
295                 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
296                 goto create_port_err;
297         }
298
299         memset(port_base, 0, alloc_sz);
300
301         ret = dlb2_pf_create_ldb_port(&dlb2_dev->hw,
302                                       handle->domain_id,
303                                       cfg,
304                                       cq_base,
305                                       &response);
306         if (ret)
307                 goto create_port_err;
308
309         pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
310         dlb2_port[response.id][DLB2_LDB_PORT].pp_addr =
311                 (void *)(pp_base + (rte_mem_page_size() * response.id));
312
313         dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
314         memset(&port_memory, 0, sizeof(port_memory));
315
316         dlb2_port[response.id][DLB2_LDB_PORT].mz = mz;
317
318         dlb2_list_init_head(&port_memory.list);
319
320         cfg->response = response;
321
322         return 0;
323
324 create_port_err:
325
326         rte_memzone_free(mz);
327
328         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
329                   __func__, ret);
330         return ret;
331 }
332
333 static int
334 dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
335                         struct dlb2_create_dir_port_args *cfg,
336                         enum dlb2_cq_poll_modes poll_mode)
337 {
338         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
339         struct dlb2_cmd_response response = {0};
340         struct dlb2_port_memory port_memory;
341         int ret;
342         uint8_t *port_base;
343         const struct rte_memzone *mz;
344         int alloc_sz, qe_sz;
345         phys_addr_t cq_base;
346         phys_addr_t pp_base;
347         int is_dir = true;
348
349         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
350
351         if (poll_mode == DLB2_CQ_POLL_MODE_STD)
352                 qe_sz = sizeof(struct dlb2_dequeue_qe);
353         else
354                 qe_sz = RTE_CACHE_LINE_SIZE;
355
356         /* Calculate the port memory required, and round up to the nearest
357          * cache line.
358          */
359         alloc_sz = cfg->cq_depth * qe_sz;
360         alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
361
362         port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
363                                                 rte_mem_page_size());
364         if (port_base == NULL)
365                 return -ENOMEM;
366
367         /* Lock the page in memory */
368         ret = rte_mem_lock_page(port_base);
369         if (ret < 0) {
370                 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
371                 goto create_port_err;
372         }
373
374         memset(port_base, 0, alloc_sz);
375
376         ret = dlb2_pf_create_dir_port(&dlb2_dev->hw,
377                                       handle->domain_id,
378                                       cfg,
379                                       cq_base,
380                                       &response);
381         if (ret)
382                 goto create_port_err;
383
384         pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
385         dlb2_port[response.id][DLB2_DIR_PORT].pp_addr =
386                 (void *)(pp_base + (rte_mem_page_size() * response.id));
387
388         dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
389                 (void *)(port_base);
390         memset(&port_memory, 0, sizeof(port_memory));
391
392         dlb2_port[response.id][DLB2_DIR_PORT].mz = mz;
393
394         dlb2_list_init_head(&port_memory.list);
395
396         cfg->response = response;
397
398         return 0;
399
400 create_port_err:
401
402         rte_memzone_free(mz);
403
404         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
405                   __func__, ret);
406
407         return ret;
408 }
409
410 static int
411 dlb2_pf_dir_queue_create(struct dlb2_hw_dev *handle,
412                          struct dlb2_create_dir_queue_args *cfg)
413 {
414         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
415         struct dlb2_cmd_response response = {0};
416         int ret;
417
418         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
419
420         ret = dlb2_pf_create_dir_queue(&dlb2_dev->hw,
421                                        handle->domain_id,
422                                        cfg,
423                                        &response);
424
425         cfg->response = response;
426
427         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
428                   __func__, ret);
429
430         return ret;
431 }
432
433 static int
434 dlb2_pf_map_qid(struct dlb2_hw_dev *handle,
435                 struct dlb2_map_qid_args *cfg)
436 {
437         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
438         struct dlb2_cmd_response response = {0};
439         int ret;
440
441         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
442
443         ret = dlb2_hw_map_qid(&dlb2_dev->hw,
444                               handle->domain_id,
445                               cfg,
446                               &response,
447                               false,
448                               0);
449
450         cfg->response = response;
451
452         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
453                   __func__, ret);
454
455         return ret;
456 }
457
458 static int
459 dlb2_pf_unmap_qid(struct dlb2_hw_dev *handle,
460                   struct dlb2_unmap_qid_args *cfg)
461 {
462         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
463         struct dlb2_cmd_response response = {0};
464         int ret;
465
466         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
467
468         ret = dlb2_hw_unmap_qid(&dlb2_dev->hw,
469                                 handle->domain_id,
470                                 cfg,
471                                 &response,
472                                 false,
473                                 0);
474
475         cfg->response = response;
476
477         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
478                   __func__, ret);
479
480         return ret;
481 }
482
483 static int
484 dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev *handle,
485                             struct dlb2_pending_port_unmaps_args *args)
486 {
487         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
488         struct dlb2_cmd_response response = {0};
489         int ret;
490
491         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
492
493         ret = dlb2_hw_pending_port_unmaps(&dlb2_dev->hw,
494                                           handle->domain_id,
495                                           args,
496                                           &response,
497                                           false,
498                                           0);
499
500         args->response = response;
501
502         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
503                   __func__, ret);
504
505         return ret;
506 }
507
508 static int
509 dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle,
510                            struct dlb2_start_domain_args *cfg)
511 {
512         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
513         struct dlb2_cmd_response response = {0};
514         int ret;
515
516         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
517
518         ret = dlb2_pf_start_domain(&dlb2_dev->hw,
519                                    handle->domain_id,
520                                    cfg,
521                                    &response);
522
523         cfg->response = response;
524
525         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
526                   __func__, ret);
527
528         return ret;
529 }
530
531 static int
532 dlb2_pf_get_ldb_queue_depth(struct dlb2_hw_dev *handle,
533                             struct dlb2_get_ldb_queue_depth_args *args)
534 {
535         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
536         struct dlb2_cmd_response response = {0};
537         int ret;
538
539         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
540
541         ret = dlb2_hw_get_ldb_queue_depth(&dlb2_dev->hw,
542                                           handle->domain_id,
543                                           args,
544                                           &response,
545                                           false,
546                                           0);
547
548         args->response = response;
549
550         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
551                   __func__, ret);
552
553         return ret;
554 }
555
556 static int
557 dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle,
558                             struct dlb2_get_dir_queue_depth_args *args)
559 {
560         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
561         struct dlb2_cmd_response response = {0};
562         int ret = 0;
563
564         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
565
566         ret = dlb2_hw_get_dir_queue_depth(&dlb2_dev->hw,
567                                           handle->domain_id,
568                                           args,
569                                           &response,
570                                           false,
571                                           0);
572
573         args->response = response;
574
575         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
576                   __func__, ret);
577
578         return ret;
579 }
580
581 static void
582 dlb2_pf_iface_fn_ptrs_init(void)
583 {
584         dlb2_iface_low_level_io_init = dlb2_pf_low_level_io_init;
585         dlb2_iface_open = dlb2_pf_open;
586         dlb2_iface_domain_reset = dlb2_pf_domain_reset;
587         dlb2_iface_get_device_version = dlb2_pf_get_device_version;
588         dlb2_iface_hardware_init = dlb2_pf_hardware_init;
589         dlb2_iface_get_num_resources = dlb2_pf_get_num_resources;
590         dlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;
591         dlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;
592         dlb2_iface_ldb_queue_create = dlb2_pf_ldb_queue_create;
593         dlb2_iface_ldb_port_create = dlb2_pf_ldb_port_create;
594         dlb2_iface_dir_queue_create = dlb2_pf_dir_queue_create;
595         dlb2_iface_dir_port_create = dlb2_pf_dir_port_create;
596         dlb2_iface_map_qid = dlb2_pf_map_qid;
597         dlb2_iface_unmap_qid = dlb2_pf_unmap_qid;
598         dlb2_iface_get_ldb_queue_depth = dlb2_pf_get_ldb_queue_depth;
599         dlb2_iface_get_dir_queue_depth = dlb2_pf_get_dir_queue_depth;
600         dlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start;
601         dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps;
602         dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;
603         dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;
604         dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;
605 }
606
607 /* PCI DEV HOOKS */
608 static int
609 dlb2_eventdev_pci_init(struct rte_eventdev *eventdev)
610 {
611         int ret = 0;
612         struct rte_pci_device *pci_dev;
613         struct dlb2_devargs dlb2_args = {
614                 .socket_id = rte_socket_id(),
615                 .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
616                 .num_dir_credits_override = -1,
617                 .qid_depth_thresholds = { {0} },
618                 .cos_id = DLB2_COS_DEFAULT,
619                 .poll_interval = DLB2_POLL_INTERVAL_DEFAULT,
620                 .sw_credit_quanta = DLB2_SW_CREDIT_QUANTA_DEFAULT,
621                 .default_depth_thresh = DLB2_DEPTH_THRESH_DEFAULT
622         };
623         struct dlb2_eventdev *dlb2;
624
625         DLB2_LOG_DBG("Enter with dev_id=%d socket_id=%d",
626                      eventdev->data->dev_id, eventdev->data->socket_id);
627
628         dlb2_pf_iface_fn_ptrs_init();
629
630         pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
631
632         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
633                 dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
634                 dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
635
636                 /* Probe the DLB2 PF layer */
637                 dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev);
638
639                 if (dlb2->qm_instance.pf_dev == NULL) {
640                         DLB2_LOG_ERR("DLB2 PF Probe failed with error %d\n",
641                                      rte_errno);
642                         ret = -rte_errno;
643                         goto dlb2_probe_failed;
644                 }
645
646                 /* Were we invoked with runtime parameters? */
647                 if (pci_dev->device.devargs) {
648                         ret = dlb2_parse_params(pci_dev->device.devargs->args,
649                                                 pci_dev->device.devargs->name,
650                                                 &dlb2_args,
651                                                 dlb2->version);
652                         if (ret) {
653                                 DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
654                                              ret, rte_errno);
655                                 goto dlb2_probe_failed;
656                         }
657                 }
658
659                 ret = dlb2_primary_eventdev_probe(eventdev,
660                                                   event_dlb2_pf_name,
661                                                   &dlb2_args);
662         } else {
663                 dlb2 = dlb2_pmd_priv(eventdev);
664                 dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
665                 ret = dlb2_secondary_eventdev_probe(eventdev,
666                                                     event_dlb2_pf_name);
667         }
668         if (ret)
669                 goto dlb2_probe_failed;
670
671         DLB2_LOG_INFO("DLB2 PF Probe success\n");
672
673         return 0;
674
675 dlb2_probe_failed:
676
677         DLB2_LOG_INFO("DLB2 PF Probe failed, ret=%d\n", ret);
678
679         return ret;
680 }
681
682 #define EVENTDEV_INTEL_VENDOR_ID 0x8086
683
684 static const struct rte_pci_id pci_id_dlb2_map[] = {
685         {
686                 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
687                                PCI_DEVICE_ID_INTEL_DLB2_PF)
688         },
689         {
690                 .vendor_id = 0,
691         },
692 };
693
694 static const struct rte_pci_id pci_id_dlb2_5_map[] = {
695         {
696                 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
697                                PCI_DEVICE_ID_INTEL_DLB2_5_PF)
698         },
699         {
700                 .vendor_id = 0,
701         },
702 };
703
704 static int
705 event_dlb2_pci_probe(struct rte_pci_driver *pci_drv,
706                      struct rte_pci_device *pci_dev)
707 {
708         int ret;
709
710         ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
711                                              sizeof(struct dlb2_eventdev),
712                                              dlb2_eventdev_pci_init,
713                                              event_dlb2_pf_name);
714         if (ret) {
715                 DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
716                                 "ret=%d\n", ret);
717         }
718
719         return ret;
720 }
721
722 static int
723 event_dlb2_pci_remove(struct rte_pci_device *pci_dev)
724 {
725         int ret;
726
727         ret = rte_event_pmd_pci_remove(pci_dev, NULL);
728
729         if (ret) {
730                 DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
731                                 "ret=%d\n", ret);
732         }
733
734         return ret;
735
736 }
737
738 static int
739 event_dlb2_5_pci_probe(struct rte_pci_driver *pci_drv,
740                        struct rte_pci_device *pci_dev)
741 {
742         int ret;
743
744         ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
745                                             sizeof(struct dlb2_eventdev),
746                                             dlb2_eventdev_pci_init,
747                                             event_dlb2_pf_name);
748         if (ret) {
749                 DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
750                                 "ret=%d\n", ret);
751         }
752
753         return ret;
754 }
755
756 static int
757 event_dlb2_5_pci_remove(struct rte_pci_device *pci_dev)
758 {
759         int ret;
760
761         ret = rte_event_pmd_pci_remove(pci_dev, NULL);
762
763         if (ret) {
764                 DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
765                                 "ret=%d\n", ret);
766         }
767
768         return ret;
769
770 }
771
772 static struct rte_pci_driver pci_eventdev_dlb2_pmd = {
773         .id_table = pci_id_dlb2_map,
774         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
775         .probe = event_dlb2_pci_probe,
776         .remove = event_dlb2_pci_remove,
777 };
778
779 static struct rte_pci_driver pci_eventdev_dlb2_5_pmd = {
780         .id_table = pci_id_dlb2_5_map,
781         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
782         .probe = event_dlb2_5_pci_probe,
783         .remove = event_dlb2_5_pci_remove,
784 };
785
786 RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd);
787 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map);
788
789 RTE_PMD_REGISTER_PCI(event_dlb2_5_pf, pci_eventdev_dlb2_5_pmd);
790 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_5_pf, pci_id_dlb2_5_map);