81d4be3a961842323c37dc839d7459dff7b00229
[dpdk.git] / app / test / virtual_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_memory.h>
39
40 #include "virtual_pmd.h"
41
42 #define MAX_PKT_BURST 512
43
44 static const char *virtual_ethdev_driver_name = "Virtual PMD";
45
46 struct virtual_ethdev_private {
47         struct rte_eth_stats eth_stats;
48
49         struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST];
50         int rx_pkt_burst_len;
51
52         int tx_burst_fail_count;
53 };
54
55 struct virtual_ethdev_queue {
56         int port_id;
57         int queue_id;
58 };
59
60 static int
61 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
62 {
63         eth_dev->data->dev_started = 1;
64
65         return 0;
66 }
67
68 static int
69 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused)
70 {
71         eth_dev->data->dev_started = 0;
72
73         return -1;
74 }
75 static void  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
76 {
77         eth_dev->data->dev_link.link_status = 0;
78         eth_dev->data->dev_started = 0;
79 }
80
81 static void
82 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused)
83 {}
84
85 static int
86 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused)
87 {
88         return 0;
89 }
90
91 static int
92 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused)
93 {
94         return -1;
95 }
96
97 static void
98 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
99                 struct rte_eth_dev_info *dev_info)
100 {
101         dev_info->driver_name = virtual_ethdev_driver_name;
102         dev_info->max_mac_addrs = 1;
103
104         dev_info->max_rx_pktlen = (uint32_t)2048;
105
106         dev_info->max_rx_queues = (uint16_t)128;
107         dev_info->max_tx_queues = (uint16_t)512;
108
109         dev_info->min_rx_bufsize = 0;
110         dev_info->pci_dev = NULL;
111 }
112
113 static int
114 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev,
115                 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused,
116                 unsigned int socket_id,
117                 const struct rte_eth_rxconf *rx_conf __rte_unused,
118                 struct rte_mempool *mb_pool __rte_unused)
119 {
120         struct virtual_ethdev_queue *rx_q;
121
122         rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
123                         sizeof(struct virtual_ethdev_queue), 0, socket_id);
124
125         if (rx_q == NULL)
126                 return -1;
127
128         rx_q->port_id = dev->data->port_id;
129         rx_q->queue_id = rx_queue_id;
130
131         dev->data->rx_queues[rx_queue_id] = rx_q;
132
133         return 0;
134 }
135
136 static int
137 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
138                 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused,
139                 unsigned int socket_id __rte_unused,
140                 const struct rte_eth_rxconf *rx_conf __rte_unused,
141                 struct rte_mempool *mb_pool __rte_unused)
142 {
143         return -1;
144 }
145
146 static int
147 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev,
148                 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused,
149                 unsigned int socket_id,
150                 const struct rte_eth_txconf *tx_conf __rte_unused)
151 {
152         struct virtual_ethdev_queue *tx_q;
153
154         tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
155                         sizeof(struct virtual_ethdev_queue), 0, socket_id);
156
157         if (tx_q == NULL)
158                 return -1;
159
160         tx_q->port_id = dev->data->port_id;
161         tx_q->queue_id = tx_queue_id;
162
163         dev->data->tx_queues[tx_queue_id] = tx_q;
164
165         return 0;
166 }
167
168 static int
169 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
170                 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused,
171                 unsigned int socket_id __rte_unused,
172                 const struct rte_eth_txconf *tx_conf __rte_unused)
173 {
174         return -1;
175 }
176
177 static void
178 virtual_ethdev_rx_queue_release(void *q __rte_unused)
179 {
180 }
181
182 static void
183 virtual_ethdev_tx_queue_release(void *q __rte_unused)
184 {
185 }
186
187 static int
188 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
189                 int wait_to_complete __rte_unused)
190 {
191         if (!bonded_eth_dev->data->dev_started)
192                 bonded_eth_dev->data->dev_link.link_status = 0;
193
194         return 0;
195 }
196
197 static int
198 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused,
199                 int wait_to_complete __rte_unused)
200 {
201         return -1;
202 }
203
204 static void
205 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
206 {
207         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
208
209         if (stats)
210                 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats));
211 }
212
213 static void
214 virtual_ethdev_stats_reset(struct rte_eth_dev *dev)
215 {
216         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
217
218         dev_private->rx_pkt_burst_len = 0;
219
220         /* Reset internal statistics */
221         memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats));
222 }
223
224 static void
225 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused)
226 {}
227
228 static void
229 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused)
230 {}
231
232
233 static struct eth_dev_ops virtual_ethdev_default_dev_ops = {
234                 .dev_configure = virtual_ethdev_configure_success,
235                 .dev_start = virtual_ethdev_start_success,
236                 .dev_stop = virtual_ethdev_stop,
237                 .dev_close = virtual_ethdev_close,
238                 .dev_infos_get = virtual_ethdev_info_get,
239                 .rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
240                 .tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
241                 .rx_queue_release = virtual_ethdev_rx_queue_release,
242                 .tx_queue_release = virtual_ethdev_tx_queue_release,
243                 .link_update = virtual_ethdev_link_update_success,
244                 .stats_get = virtual_ethdev_stats_get,
245                 .stats_reset = virtual_ethdev_stats_reset,
246                 .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable,
247                 .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable
248 };
249
250
251 void
252 virtual_ethdev_start_fn_set_success(uint8_t port_id, uint8_t success)
253 {
254         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
255
256         if (success)
257                 vrtl_eth_dev->dev_ops->dev_start = virtual_ethdev_start_success;
258         else
259                 vrtl_eth_dev->dev_ops->dev_start = virtual_ethdev_start_fail;
260
261 }
262
263 void
264 virtual_ethdev_configure_fn_set_success(uint8_t port_id, uint8_t success)
265 {
266         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
267
268         if (success)
269                 vrtl_eth_dev->dev_ops->dev_configure = virtual_ethdev_configure_success;
270         else
271                 vrtl_eth_dev->dev_ops->dev_configure = virtual_ethdev_configure_fail;
272 }
273
274 void
275 virtual_ethdev_rx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
276 {
277         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
278
279         if (success)
280                 vrtl_eth_dev->dev_ops->rx_queue_setup =
281                                 virtual_ethdev_rx_queue_setup_success;
282         else
283                 vrtl_eth_dev->dev_ops->rx_queue_setup =
284                                 virtual_ethdev_rx_queue_setup_fail;
285 }
286
287 void
288 virtual_ethdev_tx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
289 {
290         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
291
292         if (success)
293                 vrtl_eth_dev->dev_ops->tx_queue_setup =
294                                 virtual_ethdev_tx_queue_setup_success;
295         else
296                 vrtl_eth_dev->dev_ops->tx_queue_setup =
297                                 virtual_ethdev_tx_queue_setup_fail;
298 }
299
300 void
301 virtual_ethdev_link_update_fn_set_success(uint8_t port_id, uint8_t success)
302 {
303         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
304
305         if (success)
306                 vrtl_eth_dev->dev_ops->link_update = virtual_ethdev_link_update_success;
307         else
308                 vrtl_eth_dev->dev_ops->link_update = virtual_ethdev_link_update_fail;
309 }
310
311
312 static uint16_t
313 virtual_ethdev_rx_burst_success(void *queue __rte_unused,
314                                                          struct rte_mbuf **bufs,
315                                                          uint16_t nb_pkts)
316 {
317         struct rte_eth_dev *vrtl_eth_dev;
318         struct virtual_ethdev_queue *pq_map;
319         struct virtual_ethdev_private *dev_private;
320
321         int i;
322
323         pq_map = (struct virtual_ethdev_queue *)queue;
324
325         vrtl_eth_dev = &rte_eth_devices[pq_map->port_id];
326
327         dev_private = vrtl_eth_dev->data->dev_private;
328
329         if (dev_private->rx_pkt_burst_len > 0) {
330                 if (dev_private->rx_pkt_burst_len < nb_pkts) {
331
332                         for (i = 0; i < dev_private->rx_pkt_burst_len; i++) {
333                                 bufs[i] = dev_private->rx_pkt_burst[i];
334                                 dev_private->rx_pkt_burst[i] = NULL;
335                         }
336
337                         dev_private->eth_stats.ipackets = dev_private->rx_pkt_burst_len;
338                 }
339                 /* reset private burst values */
340                 dev_private->rx_pkt_burst_len = 0;
341         }
342
343         return dev_private->eth_stats.ipackets;
344 }
345
346 static uint16_t
347 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
348                                                          struct rte_mbuf **bufs __rte_unused,
349                                                          uint16_t nb_pkts __rte_unused)
350 {
351         return 0;
352 }
353
354 static uint16_t
355 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
356                 uint16_t nb_pkts)
357 {
358         struct virtual_ethdev_queue *tx_q = (struct virtual_ethdev_queue *)queue;
359
360         struct rte_eth_dev *vrtl_eth_dev;
361         struct virtual_ethdev_private *dev_private;
362
363         int i;
364
365         vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
366         dev_private = vrtl_eth_dev->data->dev_private;
367
368         if (vrtl_eth_dev->data->dev_link.link_status) {
369                 /* increment opacket count */
370                 dev_private->eth_stats.opackets += nb_pkts;
371
372                 /* free packets in burst */
373                 for (i = 0; i < nb_pkts; i++)
374                         rte_pktmbuf_free(bufs[i]);
375
376                 return nb_pkts;
377         }
378
379         return 0;
380 }
381
382 static uint16_t
383 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
384                 uint16_t nb_pkts)
385 {
386         struct rte_eth_dev *vrtl_eth_dev = NULL;
387         struct virtual_ethdev_queue *tx_q = NULL;
388         struct virtual_ethdev_private *dev_private = NULL;
389
390         int i;
391
392         tx_q = (struct virtual_ethdev_queue *)queue;
393         vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
394         dev_private = vrtl_eth_dev->data->dev_private;
395
396         if (dev_private->tx_burst_fail_count < nb_pkts) {
397                 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count;
398
399                 /* increment opacket count */
400                 dev_private->eth_stats.opackets += successfully_txd;
401
402                 /* free packets in burst */
403                 for (i = 0; i < successfully_txd; i++) {
404                         /* free packets in burst */
405                         if (bufs[i] != NULL)
406                                 rte_pktmbuf_free(bufs[i]);
407
408                         bufs[i] = NULL;
409                 }
410
411                 return successfully_txd;
412         }
413
414         return 0;
415 }
416
417
418 void
419 virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success)
420 {
421         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
422
423         if (success)
424                 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
425         else
426                 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail;
427 }
428
429
430 void
431 virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success)
432 {
433         struct virtual_ethdev_private *dev_private = NULL;
434         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
435
436         dev_private = vrtl_eth_dev->data->dev_private;
437
438         if (success)
439                 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
440         else
441                 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail;
442
443         dev_private->tx_burst_fail_count = 0;
444 }
445
446 void
447 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id,
448                 uint8_t packet_fail_count)
449 {
450         struct virtual_ethdev_private *dev_private = NULL;
451         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
452
453
454         dev_private = vrtl_eth_dev->data->dev_private;
455         dev_private->tx_burst_fail_count = packet_fail_count;
456 }
457
458 void
459 virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id,
460                 uint8_t link_status)
461 {
462         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
463
464         vrtl_eth_dev->data->dev_link.link_status = link_status;
465
466         _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC);
467 }
468
469
470
471 void
472 virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
473                 struct rte_mbuf **pkt_burst, int burst_length)
474 {
475         struct virtual_ethdev_private *dev_private = NULL;
476         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
477
478         int i;
479
480         dev_private = vrtl_eth_dev->data->dev_private;
481
482         for (i = 0; i < burst_length; i++)
483                 dev_private->rx_pkt_burst[i] = pkt_burst[i];
484
485         dev_private->rx_pkt_burst_len = burst_length;
486 }
487
488 static uint8_t
489 get_number_of_sockets(void)
490 {
491         int sockets = 0;
492         int i;
493         const struct rte_memseg *ms = rte_eal_get_physmem_layout();
494
495         for (i = 0; i < RTE_MAX_MEMSEG && ms[i].addr != NULL; i++) {
496                 if (sockets < ms[i].socket_id)
497                         sockets = ms[i].socket_id;
498         }
499         /* Number of sockets = maximum socket_id + 1 */
500         return ++sockets;
501 }
502
503
504 int
505 virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
506                 uint8_t socket_id)
507 {
508         struct rte_pci_device *pci_dev = NULL;
509         struct rte_eth_dev *eth_dev = NULL;
510         struct eth_driver *eth_drv = NULL;
511         struct rte_pci_driver *pci_drv = NULL;
512         struct eth_dev_ops *dev_ops = NULL;
513         struct rte_pci_id *id_table = NULL;
514         struct virtual_ethdev_private *dev_private = NULL;
515
516
517         /* now do all data allocation - for eth_dev structure, dummy pci driver
518          * and internal (dev_private) data
519          */
520
521         if (socket_id >= get_number_of_sockets())
522                 goto err;
523
524         pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
525         if (pci_dev == NULL)
526                 goto err;
527
528         eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id);
529         if (eth_drv == NULL)
530                 goto err;
531
532         pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
533         if (pci_drv == NULL)
534                 goto err;
535
536         dev_ops = rte_zmalloc_socket(name, sizeof(*dev_ops), 0, socket_id);
537         if (dev_ops == NULL)
538                 goto err;
539
540         id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id);
541         if (id_table == NULL)
542                 goto err;
543
544         dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id);
545         if (dev_private == NULL)
546                 goto err;
547
548         /* reserve an ethdev entry */
549         eth_dev = rte_eth_dev_allocate(name);
550         if (eth_dev == NULL)
551                 goto err;
552
553         pci_dev->numa_node = socket_id;
554         pci_drv->name = virtual_ethdev_driver_name;
555         pci_drv->id_table = id_table;
556         pci_drv->drv_flags = RTE_PCI_DRV_INTR_LSC;
557
558         eth_drv->pci_drv = (struct rte_pci_driver)(*pci_drv);
559         eth_dev->driver = eth_drv;
560
561         eth_dev->data->nb_rx_queues = (uint16_t)1;
562         eth_dev->data->nb_tx_queues = (uint16_t)1;
563
564         TAILQ_INIT(&(eth_dev->callbacks));
565
566         eth_dev->data->dev_link.link_status = 0;
567         eth_dev->data->dev_link.link_speed = ETH_LINK_SPEED_10000;
568         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
569
570         eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
571         if (eth_dev->data->mac_addrs == NULL)
572                 goto err;
573
574         memcpy(eth_dev->data->mac_addrs, mac_addr,
575                         sizeof(*eth_dev->data->mac_addrs));
576         eth_dev->data->mac_addrs->addr_bytes[5] = eth_dev->data->port_id;
577
578         eth_dev->data->dev_started = 0;
579         eth_dev->data->promiscuous = 0;
580         eth_dev->data->scattered_rx = 0;
581         eth_dev->data->all_multicast = 0;
582
583         memset(dev_private, 0, sizeof(*dev_private));
584         eth_dev->data->dev_private = dev_private;
585
586         eth_dev->dev_ops = dev_ops;
587
588         /* Copy default device operation functions */
589         memcpy(eth_dev->dev_ops, &virtual_ethdev_default_dev_ops,
590                         sizeof(*eth_dev->dev_ops));
591
592         eth_dev->pci_dev = pci_dev;
593         eth_dev->pci_dev->driver = &eth_drv->pci_drv;
594
595         eth_dev->pci_dev->driver->id_table->device_id = 0xBEEF;
596
597         eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
598         eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
599
600         return eth_dev->data->port_id;
601
602 err:
603         if (pci_dev)
604                 rte_free(pci_dev);
605         if (pci_drv)
606                 rte_free(pci_drv);
607         if (eth_drv)
608                 rte_free(eth_drv);
609         if (dev_ops)
610                 rte_free(dev_ops);
611         if (id_table)
612                 rte_free(id_table);
613         if (dev_private)
614                 rte_free(dev_private);
615
616         return -1;
617 }