rawdev: add private data size to info query
[dpdk.git] / app / test / virtual_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <rte_mbuf.h>
6 #include <rte_ethdev.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_pci.h>
9 #include <rte_bus_pci.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_memory.h>
13 #include <rte_ring.h>
14
15 #include "virtual_pmd.h"
16
17 #define MAX_PKT_BURST 512
18
19 static const char *virtual_ethdev_driver_name = "Virtual PMD";
20
21 struct virtual_ethdev_private {
22         struct eth_dev_ops dev_ops;
23         struct rte_eth_stats eth_stats;
24
25         struct rte_ring *rx_queue;
26         struct rte_ring *tx_queue;
27
28         int tx_burst_fail_count;
29 };
30
31 struct virtual_ethdev_queue {
32         int port_id;
33         int queue_id;
34 };
35
36 static int
37 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
38 {
39         eth_dev->data->dev_started = 1;
40
41         return 0;
42 }
43
44 static int
45 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused)
46 {
47         eth_dev->data->dev_started = 0;
48
49         return -1;
50 }
51 static void  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
52 {
53         void *pkt = NULL;
54         struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
55
56         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
57         eth_dev->data->dev_started = 0;
58         while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
59                 rte_pktmbuf_free(pkt);
60
61         while (rte_ring_dequeue(prv->tx_queue, &pkt) != -ENOENT)
62                 rte_pktmbuf_free(pkt);
63 }
64
65 static void
66 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused)
67 {}
68
69 static int
70 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused)
71 {
72         return 0;
73 }
74
75 static int
76 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused)
77 {
78         return -1;
79 }
80
81 static int
82 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
83                 struct rte_eth_dev_info *dev_info)
84 {
85         dev_info->driver_name = virtual_ethdev_driver_name;
86         dev_info->max_mac_addrs = 1;
87
88         dev_info->max_rx_pktlen = (uint32_t)2048;
89
90         dev_info->max_rx_queues = (uint16_t)128;
91         dev_info->max_tx_queues = (uint16_t)512;
92
93         dev_info->min_rx_bufsize = 0;
94
95         return 0;
96 }
97
98 static int
99 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev,
100                 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused,
101                 unsigned int socket_id,
102                 const struct rte_eth_rxconf *rx_conf __rte_unused,
103                 struct rte_mempool *mb_pool __rte_unused)
104 {
105         struct virtual_ethdev_queue *rx_q;
106
107         rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
108                         sizeof(struct virtual_ethdev_queue), 0, socket_id);
109
110         if (rx_q == NULL)
111                 return -1;
112
113         rx_q->port_id = dev->data->port_id;
114         rx_q->queue_id = rx_queue_id;
115
116         dev->data->rx_queues[rx_queue_id] = rx_q;
117
118         return 0;
119 }
120
121 static int
122 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
123                 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused,
124                 unsigned int socket_id __rte_unused,
125                 const struct rte_eth_rxconf *rx_conf __rte_unused,
126                 struct rte_mempool *mb_pool __rte_unused)
127 {
128         return -1;
129 }
130
131 static int
132 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev,
133                 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused,
134                 unsigned int socket_id,
135                 const struct rte_eth_txconf *tx_conf __rte_unused)
136 {
137         struct virtual_ethdev_queue *tx_q;
138
139         tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
140                         sizeof(struct virtual_ethdev_queue), 0, socket_id);
141
142         if (tx_q == NULL)
143                 return -1;
144
145         tx_q->port_id = dev->data->port_id;
146         tx_q->queue_id = tx_queue_id;
147
148         dev->data->tx_queues[tx_queue_id] = tx_q;
149
150         return 0;
151 }
152
153 static int
154 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
155                 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused,
156                 unsigned int socket_id __rte_unused,
157                 const struct rte_eth_txconf *tx_conf __rte_unused)
158 {
159         return -1;
160 }
161
162 static void
163 virtual_ethdev_rx_queue_release(void *q __rte_unused)
164 {
165 }
166
167 static void
168 virtual_ethdev_tx_queue_release(void *q __rte_unused)
169 {
170 }
171
172 static int
173 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
174                 int wait_to_complete __rte_unused)
175 {
176         if (!bonded_eth_dev->data->dev_started)
177                 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
178
179         return 0;
180 }
181
182 static int
183 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused,
184                 int wait_to_complete __rte_unused)
185 {
186         return -1;
187 }
188
189 static int
190 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
191 {
192         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
193
194         if (stats)
195                 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats));
196
197         return 0;
198 }
199
200 static int
201 virtual_ethdev_stats_reset(struct rte_eth_dev *dev)
202 {
203         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
204         void *pkt = NULL;
205
206         while (rte_ring_dequeue(dev_private->tx_queue, &pkt) == -ENOBUFS)
207                         rte_pktmbuf_free(pkt);
208
209         /* Reset internal statistics */
210         memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats));
211
212         return 0;
213 }
214
215 static int
216 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused)
217 {
218         return 0;
219 }
220
221 static int
222 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused)
223 {
224         return 0;
225 }
226
227 static int
228 virtual_ethdev_mac_address_set(__rte_unused struct rte_eth_dev *dev,
229                                __rte_unused struct rte_ether_addr *addr)
230 {
231         return 0;
232 }
233
234 static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
235         .dev_configure = virtual_ethdev_configure_success,
236         .dev_start = virtual_ethdev_start_success,
237         .dev_stop = virtual_ethdev_stop,
238         .dev_close = virtual_ethdev_close,
239         .dev_infos_get = virtual_ethdev_info_get,
240         .rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
241         .tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
242         .rx_queue_release = virtual_ethdev_rx_queue_release,
243         .tx_queue_release = virtual_ethdev_tx_queue_release,
244         .link_update = virtual_ethdev_link_update_success,
245         .mac_addr_set = virtual_ethdev_mac_address_set,
246         .stats_get = virtual_ethdev_stats_get,
247         .stats_reset = virtual_ethdev_stats_reset,
248         .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable,
249         .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable
250 };
251
252 void
253 virtual_ethdev_start_fn_set_success(uint16_t port_id, uint8_t success)
254 {
255         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
256         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
257         struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
258
259         if (success)
260                 dev_ops->dev_start = virtual_ethdev_start_success;
261         else
262                 dev_ops->dev_start = virtual_ethdev_start_fail;
263
264 }
265
266 void
267 virtual_ethdev_configure_fn_set_success(uint16_t port_id, uint8_t success)
268 {
269         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
270         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
271         struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
272
273         if (success)
274                 dev_ops->dev_configure = virtual_ethdev_configure_success;
275         else
276                 dev_ops->dev_configure = virtual_ethdev_configure_fail;
277 }
278
279 void
280 virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
281 {
282         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
283         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
284         struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
285
286         if (success)
287                 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_success;
288         else
289                 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_fail;
290 }
291
292 void
293 virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
294 {
295         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
296         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
297         struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
298
299         if (success)
300                 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_success;
301         else
302                 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_fail;
303 }
304
305 void
306 virtual_ethdev_link_update_fn_set_success(uint16_t port_id, uint8_t success)
307 {
308         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
309         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
310         struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
311
312         if (success)
313                 dev_ops->link_update = virtual_ethdev_link_update_success;
314         else
315                 dev_ops->link_update = virtual_ethdev_link_update_fail;
316 }
317
318
319 static uint16_t
320 virtual_ethdev_rx_burst_success(void *queue __rte_unused,
321                                                          struct rte_mbuf **bufs,
322                                                          uint16_t nb_pkts)
323 {
324         struct rte_eth_dev *vrtl_eth_dev;
325         struct virtual_ethdev_queue *pq_map;
326         struct virtual_ethdev_private *dev_private;
327
328         int rx_count, i;
329
330         pq_map = (struct virtual_ethdev_queue *)queue;
331         vrtl_eth_dev = &rte_eth_devices[pq_map->port_id];
332         dev_private = vrtl_eth_dev->data->dev_private;
333
334         rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
335                         nb_pkts, NULL);
336
337         /* increments ipackets count */
338         dev_private->eth_stats.ipackets += rx_count;
339
340         /* increments ibytes count */
341         for (i = 0; i < rx_count; i++)
342                 dev_private->eth_stats.ibytes += rte_pktmbuf_pkt_len(bufs[i]);
343
344         return rx_count;
345 }
346
347 static uint16_t
348 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
349                                                          struct rte_mbuf **bufs __rte_unused,
350                                                          uint16_t nb_pkts __rte_unused)
351 {
352         return 0;
353 }
354
355 static uint16_t
356 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
357                 uint16_t nb_pkts)
358 {
359         struct virtual_ethdev_queue *tx_q = queue;
360
361         struct rte_eth_dev *vrtl_eth_dev;
362         struct virtual_ethdev_private *dev_private;
363
364         int i;
365
366         vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
367         dev_private = vrtl_eth_dev->data->dev_private;
368
369         if (!vrtl_eth_dev->data->dev_link.link_status)
370                 nb_pkts = 0;
371         else
372                 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
373                                 nb_pkts, NULL);
374
375         /* increment opacket count */
376         dev_private->eth_stats.opackets += nb_pkts;
377
378         /* increment obytes count */
379         for (i = 0; i < nb_pkts; i++)
380                 dev_private->eth_stats.obytes += rte_pktmbuf_pkt_len(bufs[i]);
381
382         return nb_pkts;
383 }
384
385 static uint16_t
386 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
387                 uint16_t nb_pkts)
388 {
389         struct rte_eth_dev *vrtl_eth_dev = NULL;
390         struct virtual_ethdev_queue *tx_q = NULL;
391         struct virtual_ethdev_private *dev_private = NULL;
392
393         int i;
394
395         tx_q = queue;
396         vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
397         dev_private = vrtl_eth_dev->data->dev_private;
398
399         if (dev_private->tx_burst_fail_count < nb_pkts) {
400                 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count;
401
402                 /* increment opacket count */
403                 dev_private->eth_stats.opackets += successfully_txd;
404
405                 /* free packets in burst */
406                 for (i = 0; i < successfully_txd; i++) {
407                         /* free packets in burst */
408                         if (bufs[i] != NULL)
409                                 rte_pktmbuf_free(bufs[i]);
410
411                         bufs[i] = NULL;
412                 }
413
414                 return successfully_txd;
415         }
416
417         return 0;
418 }
419
420
421 void
422 virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success)
423 {
424         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
425
426         if (success)
427                 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
428         else
429                 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail;
430 }
431
432
433 void
434 virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success)
435 {
436         struct virtual_ethdev_private *dev_private = NULL;
437         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
438
439         dev_private = vrtl_eth_dev->data->dev_private;
440
441         if (success)
442                 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
443         else
444                 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail;
445
446         dev_private->tx_burst_fail_count = 0;
447 }
448
449 void
450 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
451                 uint8_t packet_fail_count)
452 {
453         struct virtual_ethdev_private *dev_private = NULL;
454         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
455
456
457         dev_private = vrtl_eth_dev->data->dev_private;
458         dev_private->tx_burst_fail_count = packet_fail_count;
459 }
460
461 void
462 virtual_ethdev_set_link_status(uint16_t port_id, uint8_t link_status)
463 {
464         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
465
466         vrtl_eth_dev->data->dev_link.link_status = link_status;
467 }
468
469 void
470 virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,
471                 uint8_t link_status)
472 {
473         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
474
475         vrtl_eth_dev->data->dev_link.link_status = link_status;
476
477         _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC,
478                                       NULL);
479 }
480
481 int
482 virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id,
483                 struct rte_mbuf **pkt_burst, int burst_length)
484 {
485         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
486         struct virtual_ethdev_private *dev_private =
487                         vrtl_eth_dev->data->dev_private;
488
489         return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
490                         burst_length, NULL);
491 }
492
493 int
494 virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id,
495                 struct rte_mbuf **pkt_burst, int burst_length)
496 {
497         struct virtual_ethdev_private *dev_private;
498         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
499
500         dev_private = vrtl_eth_dev->data->dev_private;
501         return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
502                 burst_length, NULL);
503 }
504
505
506 int
507 virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
508                 uint8_t socket_id, uint8_t isr_support)
509 {
510         struct rte_pci_device *pci_dev = NULL;
511         struct rte_eth_dev *eth_dev = NULL;
512         struct rte_pci_driver *pci_drv = NULL;
513         struct rte_pci_id *id_table = NULL;
514         struct virtual_ethdev_private *dev_private = NULL;
515         char name_buf[RTE_RING_NAMESIZE];
516
517
518         /* now do all data allocation - for eth_dev structure, dummy pci driver
519          * and internal (dev_private) data
520          */
521
522         pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
523         if (pci_dev == NULL)
524                 goto err;
525
526         pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
527         if (pci_drv == NULL)
528                 goto err;
529
530         id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id);
531         if (id_table == NULL)
532                 goto err;
533         id_table->device_id = 0xBEEF;
534
535         dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id);
536         if (dev_private == NULL)
537                 goto err;
538
539         snprintf(name_buf, sizeof(name_buf), "%s_rxQ", name);
540         dev_private->rx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
541                         0);
542         if (dev_private->rx_queue == NULL)
543                 goto err;
544
545         snprintf(name_buf, sizeof(name_buf), "%s_txQ", name);
546         dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
547                         0);
548         if (dev_private->tx_queue == NULL)
549                 goto err;
550
551         /* reserve an ethdev entry */
552         eth_dev = rte_eth_dev_allocate(name);
553         if (eth_dev == NULL)
554                 goto err;
555
556         pci_dev->device.numa_node = socket_id;
557         pci_dev->device.name = eth_dev->data->name;
558         pci_drv->driver.name = virtual_ethdev_driver_name;
559         pci_drv->id_table = id_table;
560
561         if (isr_support)
562                 pci_drv->drv_flags |= RTE_PCI_DRV_INTR_LSC;
563         else
564                 pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
565
566
567         eth_dev->device = &pci_dev->device;
568         eth_dev->device->driver = &pci_drv->driver;
569
570         eth_dev->data->nb_rx_queues = (uint16_t)1;
571         eth_dev->data->nb_tx_queues = (uint16_t)1;
572
573         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
574         eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
575         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
576
577         eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
578         if (eth_dev->data->mac_addrs == NULL)
579                 goto err;
580
581         memcpy(eth_dev->data->mac_addrs, mac_addr,
582                         sizeof(*eth_dev->data->mac_addrs));
583
584         eth_dev->data->dev_started = 0;
585         eth_dev->data->promiscuous = 0;
586         eth_dev->data->scattered_rx = 0;
587         eth_dev->data->all_multicast = 0;
588
589         eth_dev->data->dev_private = dev_private;
590
591         /* Copy default device operation functions */
592         dev_private->dev_ops = virtual_ethdev_default_dev_ops;
593         eth_dev->dev_ops = &dev_private->dev_ops;
594
595         pci_dev->device.driver = &pci_drv->driver;
596         eth_dev->device = &pci_dev->device;
597
598         eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
599         eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
600
601         rte_eth_dev_probing_finish(eth_dev);
602
603         return eth_dev->data->port_id;
604
605 err:
606         rte_free(pci_dev);
607         rte_free(pci_drv);
608         rte_free(id_table);
609         rte_free(dev_private);
610
611         return -1;
612 }