eventdev: make PCI probe and remove functions optional
[dpdk.git] / drivers / event / skeleton / skeleton_eventdev.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium networks Ltd. 2016.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium networks nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <assert.h>
34 #include <stdio.h>
35 #include <stdbool.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39
40 #include <rte_byteorder.h>
41 #include <rte_common.h>
42 #include <rte_debug.h>
43 #include <rte_dev.h>
44 #include <rte_eal.h>
45 #include <rte_log.h>
46 #include <rte_malloc.h>
47 #include <rte_memory.h>
48 #include <rte_memzone.h>
49 #include <rte_lcore.h>
50 #include <rte_vdev.h>
51
52 #include "skeleton_eventdev.h"
53
54 #define EVENTDEV_NAME_SKELETON_PMD event_skeleton
55 /**< Skeleton event device PMD name */
56
57 static uint16_t
58 skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
59 {
60         struct skeleton_port *sp = port;
61
62         RTE_SET_USED(sp);
63         RTE_SET_USED(ev);
64         RTE_SET_USED(port);
65
66         return 0;
67 }
68
69 static uint16_t
70 skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
71                         uint16_t nb_events)
72 {
73         struct skeleton_port *sp = port;
74
75         RTE_SET_USED(sp);
76         RTE_SET_USED(ev);
77         RTE_SET_USED(port);
78         RTE_SET_USED(nb_events);
79
80         return 0;
81 }
82
83 static uint16_t
84 skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
85                                 uint64_t timeout_ticks)
86 {
87         struct skeleton_port *sp = port;
88
89         RTE_SET_USED(sp);
90         RTE_SET_USED(ev);
91         RTE_SET_USED(timeout_ticks);
92
93         return 0;
94 }
95
96 static uint16_t
97 skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
98                 uint16_t nb_events, uint64_t timeout_ticks)
99 {
100         struct skeleton_port *sp = port;
101
102         RTE_SET_USED(sp);
103         RTE_SET_USED(ev);
104         RTE_SET_USED(nb_events);
105         RTE_SET_USED(timeout_ticks);
106
107         return 0;
108 }
109
110 static void
111 skeleton_eventdev_info_get(struct rte_eventdev *dev,
112                 struct rte_event_dev_info *dev_info)
113 {
114         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
115
116         PMD_DRV_FUNC_TRACE();
117
118         RTE_SET_USED(skel);
119
120         dev_info->min_dequeue_timeout_ns = 1;
121         dev_info->max_dequeue_timeout_ns = 10000;
122         dev_info->dequeue_timeout_ns = 25;
123         dev_info->max_event_queues = 64;
124         dev_info->max_event_queue_flows = (1ULL << 20);
125         dev_info->max_event_queue_priority_levels = 8;
126         dev_info->max_event_priority_levels = 8;
127         dev_info->max_event_ports = 32;
128         dev_info->max_event_port_dequeue_depth = 16;
129         dev_info->max_event_port_enqueue_depth = 16;
130         dev_info->max_num_events = (1ULL << 20);
131         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
132                                         RTE_EVENT_DEV_CAP_EVENT_QOS;
133 }
134
135 static int
136 skeleton_eventdev_configure(const struct rte_eventdev *dev)
137 {
138         struct rte_eventdev_data *data = dev->data;
139         struct rte_event_dev_config *conf = &data->dev_conf;
140         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
141
142         PMD_DRV_FUNC_TRACE();
143
144         RTE_SET_USED(conf);
145         RTE_SET_USED(skel);
146
147         PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
148         return 0;
149 }
150
151 static int
152 skeleton_eventdev_start(struct rte_eventdev *dev)
153 {
154         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
155
156         PMD_DRV_FUNC_TRACE();
157
158         RTE_SET_USED(skel);
159
160         return 0;
161 }
162
163 static void
164 skeleton_eventdev_stop(struct rte_eventdev *dev)
165 {
166         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
167
168         PMD_DRV_FUNC_TRACE();
169
170         RTE_SET_USED(skel);
171 }
172
173 static int
174 skeleton_eventdev_close(struct rte_eventdev *dev)
175 {
176         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
177
178         PMD_DRV_FUNC_TRACE();
179
180         RTE_SET_USED(skel);
181
182         return 0;
183 }
184
185 static void
186 skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
187                                  struct rte_event_queue_conf *queue_conf)
188 {
189         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
190
191         PMD_DRV_FUNC_TRACE();
192
193         RTE_SET_USED(skel);
194         RTE_SET_USED(queue_id);
195
196         queue_conf->nb_atomic_flows = (1ULL << 20);
197         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
198         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
199         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
200 }
201
202 static void
203 skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
204 {
205         PMD_DRV_FUNC_TRACE();
206
207         RTE_SET_USED(dev);
208         RTE_SET_USED(queue_id);
209 }
210
211 static int
212 skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
213                               const struct rte_event_queue_conf *queue_conf)
214 {
215         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
216
217         PMD_DRV_FUNC_TRACE();
218
219         RTE_SET_USED(skel);
220         RTE_SET_USED(queue_conf);
221         RTE_SET_USED(queue_id);
222
223         return 0;
224 }
225
226 static void
227 skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
228                                  struct rte_event_port_conf *port_conf)
229 {
230         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
231
232         PMD_DRV_FUNC_TRACE();
233
234         RTE_SET_USED(skel);
235         RTE_SET_USED(port_id);
236
237         port_conf->new_event_threshold = 32 * 1024;
238         port_conf->dequeue_depth = 16;
239         port_conf->enqueue_depth = 16;
240 }
241
242 static void
243 skeleton_eventdev_port_release(void *port)
244 {
245         struct skeleton_port *sp = port;
246         PMD_DRV_FUNC_TRACE();
247
248         rte_free(sp);
249 }
250
251 static int
252 skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
253                                 const struct rte_event_port_conf *port_conf)
254 {
255         struct skeleton_port *sp;
256         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
257
258         PMD_DRV_FUNC_TRACE();
259
260         RTE_SET_USED(skel);
261         RTE_SET_USED(port_conf);
262
263         /* Free memory prior to re-allocation if needed */
264         if (dev->data->ports[port_id] != NULL) {
265                 PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
266                                 port_id);
267                 skeleton_eventdev_port_release(dev->data->ports[port_id]);
268                 dev->data->ports[port_id] = NULL;
269         }
270
271         /* Allocate event port memory */
272         sp = rte_zmalloc_socket("eventdev port",
273                         sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
274                         dev->data->socket_id);
275         if (sp == NULL) {
276                 PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
277                 return -ENOMEM;
278         }
279
280         sp->port_id = port_id;
281
282         PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
283
284         dev->data->ports[port_id] = sp;
285         return 0;
286 }
287
288 static int
289 skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port,
290                         const uint8_t queues[], const uint8_t priorities[],
291                         uint16_t nb_links)
292 {
293         struct skeleton_port *sp = port;
294         PMD_DRV_FUNC_TRACE();
295
296         RTE_SET_USED(dev);
297         RTE_SET_USED(sp);
298         RTE_SET_USED(queues);
299         RTE_SET_USED(priorities);
300
301         /* Linked all the queues */
302         return (int)nb_links;
303 }
304
305 static int
306 skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
307                                  uint8_t queues[], uint16_t nb_unlinks)
308 {
309         struct skeleton_port *sp = port;
310         PMD_DRV_FUNC_TRACE();
311
312         RTE_SET_USED(dev);
313         RTE_SET_USED(sp);
314         RTE_SET_USED(queues);
315
316         /* Unlinked all the queues */
317         return (int)nb_unlinks;
318
319 }
320
321 static int
322 skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
323                                  uint64_t *timeout_ticks)
324 {
325         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
326         uint32_t scale = 1;
327
328         PMD_DRV_FUNC_TRACE();
329
330         RTE_SET_USED(skel);
331         *timeout_ticks = ns * scale;
332
333         return 0;
334 }
335
336 static void
337 skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
338 {
339         struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
340
341         PMD_DRV_FUNC_TRACE();
342
343         RTE_SET_USED(skel);
344         RTE_SET_USED(f);
345 }
346
347
348 /* Initialize and register event driver with DPDK Application */
349 static const struct rte_eventdev_ops skeleton_eventdev_ops = {
350         .dev_infos_get    = skeleton_eventdev_info_get,
351         .dev_configure    = skeleton_eventdev_configure,
352         .dev_start        = skeleton_eventdev_start,
353         .dev_stop         = skeleton_eventdev_stop,
354         .dev_close        = skeleton_eventdev_close,
355         .queue_def_conf   = skeleton_eventdev_queue_def_conf,
356         .queue_setup      = skeleton_eventdev_queue_setup,
357         .queue_release    = skeleton_eventdev_queue_release,
358         .port_def_conf    = skeleton_eventdev_port_def_conf,
359         .port_setup       = skeleton_eventdev_port_setup,
360         .port_release     = skeleton_eventdev_port_release,
361         .port_link        = skeleton_eventdev_port_link,
362         .port_unlink      = skeleton_eventdev_port_unlink,
363         .timeout_ticks    = skeleton_eventdev_timeout_ticks,
364         .dump             = skeleton_eventdev_dump
365 };
366
367 static int
368 skeleton_eventdev_init(struct rte_eventdev *eventdev)
369 {
370         struct rte_pci_device *pci_dev;
371         struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
372         int ret = 0;
373
374         PMD_DRV_FUNC_TRACE();
375
376         eventdev->dev_ops       = &skeleton_eventdev_ops;
377         eventdev->schedule      = NULL;
378         eventdev->enqueue       = skeleton_eventdev_enqueue;
379         eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
380         eventdev->dequeue       = skeleton_eventdev_dequeue;
381         eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
382
383         /* For secondary processes, the primary has done all the work */
384         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
385                 return 0;
386
387         pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
388
389         skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
390         if (!skel->reg_base) {
391                 PMD_DRV_ERR("Failed to map BAR0");
392                 ret = -ENODEV;
393                 goto fail;
394         }
395
396         skel->device_id = pci_dev->id.device_id;
397         skel->vendor_id = pci_dev->id.vendor_id;
398         skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
399         skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
400
401         PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
402                         pci_dev->id.vendor_id, pci_dev->id.device_id,
403                         pci_dev->addr.domain, pci_dev->addr.bus,
404                         pci_dev->addr.devid, pci_dev->addr.function);
405
406         PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
407                 eventdev->data->dev_id, eventdev->data->socket_id,
408                 skel->vendor_id, skel->device_id);
409
410 fail:
411         return ret;
412 }
413
414 /* PCI based event device */
415
416 #define EVENTDEV_SKEL_VENDOR_ID         0x177d
417 #define EVENTDEV_SKEL_PRODUCT_ID        0x0001
418
419 static const struct rte_pci_id pci_id_skeleton_map[] = {
420         {
421                 RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
422                                EVENTDEV_SKEL_PRODUCT_ID)
423         },
424         {
425                 .vendor_id = 0,
426         },
427 };
428
429 static int
430 event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
431                          struct rte_pci_device *pci_dev)
432 {
433         return rte_event_pmd_pci_probe(pci_drv, pci_dev,
434                 sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
435 }
436
437 static int
438 event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
439 {
440         return rte_event_pmd_pci_remove(pci_dev, NULL);
441 }
442
443 static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
444         .id_table = pci_id_skeleton_map,
445         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
446         .probe = event_skeleton_pci_probe,
447         .remove = event_skeleton_pci_remove,
448 };
449
450 RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
451 RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
452
453 /* VDEV based event device */
454
455 static int
456 skeleton_eventdev_create(const char *name, int socket_id)
457 {
458         struct rte_eventdev *eventdev;
459
460         eventdev = rte_event_pmd_vdev_init(name,
461                         sizeof(struct skeleton_eventdev), socket_id);
462         if (eventdev == NULL) {
463                 PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
464                 goto fail;
465         }
466
467         eventdev->dev_ops       = &skeleton_eventdev_ops;
468         eventdev->schedule      = NULL;
469         eventdev->enqueue       = skeleton_eventdev_enqueue;
470         eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
471         eventdev->dequeue       = skeleton_eventdev_dequeue;
472         eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
473
474         return 0;
475 fail:
476         return -EFAULT;
477 }
478
479 static int
480 skeleton_eventdev_probe(struct rte_vdev_device *vdev)
481 {
482         const char *name;
483
484         name = rte_vdev_device_name(vdev);
485         RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
486                         rte_socket_id());
487         return skeleton_eventdev_create(name, rte_socket_id());
488 }
489
490 static int
491 skeleton_eventdev_remove(struct rte_vdev_device *vdev)
492 {
493         const char *name;
494
495         name = rte_vdev_device_name(vdev);
496         PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
497
498         return rte_event_pmd_vdev_uninit(name);
499 }
500
501 static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
502         .probe = skeleton_eventdev_probe,
503         .remove = skeleton_eventdev_remove
504 };
505
506 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);