bond: fix mac assignment to slaves
[dpdk.git] / app / test / virtual_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_memory.h>
39
40 #include "virtual_pmd.h"
41
42 #define MAX_PKT_BURST 512
43
44 static const char *virtual_ethdev_driver_name = "Virtual PMD";
45
46 struct virtual_ethdev_private {
47         struct rte_eth_stats eth_stats;
48
49         struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST];
50         int rx_pkt_burst_len;
51
52         int tx_burst_fail_count;
53 };
54
55 struct virtual_ethdev_queue {
56         int port_id;
57         int queue_id;
58 };
59
60 static int
61 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
62 {
63         eth_dev->data->dev_started = 1;
64
65         return 0;
66 }
67
68 static int
69 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused)
70 {
71         eth_dev->data->dev_started = 0;
72
73         return -1;
74 }
75 static void  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
76 {
77         eth_dev->data->dev_link.link_status = 0;
78         eth_dev->data->dev_started = 0;
79 }
80
81 static void
82 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused)
83 {}
84
85 static int
86 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused)
87 {
88         return 0;
89 }
90
91 static int
92 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused)
93 {
94         return -1;
95 }
96
97 static void
98 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
99                 struct rte_eth_dev_info *dev_info)
100 {
101         dev_info->driver_name = virtual_ethdev_driver_name;
102         dev_info->max_mac_addrs = 1;
103
104         dev_info->max_rx_pktlen = (uint32_t)2048;
105
106         dev_info->max_rx_queues = (uint16_t)128;
107         dev_info->max_tx_queues = (uint16_t)512;
108
109         dev_info->min_rx_bufsize = 0;
110         dev_info->pci_dev = NULL;
111 }
112
113 static int
114 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev,
115                 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused,
116                 unsigned int socket_id,
117                 const struct rte_eth_rxconf *rx_conf __rte_unused,
118                 struct rte_mempool *mb_pool __rte_unused)
119 {
120         struct virtual_ethdev_queue *rx_q;
121
122         rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
123                         sizeof(struct virtual_ethdev_queue), 0, socket_id);
124
125         if (rx_q == NULL)
126                 return -1;
127
128         rx_q->port_id = dev->data->port_id;
129         rx_q->queue_id = rx_queue_id;
130
131         dev->data->rx_queues[rx_queue_id] = rx_q;
132
133         return 0;
134 }
135
136 static int
137 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
138                 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused,
139                 unsigned int socket_id __rte_unused,
140                 const struct rte_eth_rxconf *rx_conf __rte_unused,
141                 struct rte_mempool *mb_pool __rte_unused)
142 {
143         return -1;
144 }
145
146 static int
147 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev,
148                 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused,
149                 unsigned int socket_id,
150                 const struct rte_eth_txconf *tx_conf __rte_unused)
151 {
152         struct virtual_ethdev_queue *tx_q;
153
154         tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
155                         sizeof(struct virtual_ethdev_queue), 0, socket_id);
156
157         if (tx_q == NULL)
158                 return -1;
159
160         tx_q->port_id = dev->data->port_id;
161         tx_q->queue_id = tx_queue_id;
162
163         dev->data->tx_queues[tx_queue_id] = tx_q;
164
165         return 0;
166 }
167
168 static int
169 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
170                 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused,
171                 unsigned int socket_id __rte_unused,
172                 const struct rte_eth_txconf *tx_conf __rte_unused)
173 {
174         return -1;
175 }
176
177 static void
178 virtual_ethdev_rx_queue_release(void *q __rte_unused)
179 {
180 }
181
182 static void
183 virtual_ethdev_tx_queue_release(void *q __rte_unused)
184 {
185 }
186
187 static int
188 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
189                 int wait_to_complete __rte_unused)
190 {
191         if (!bonded_eth_dev->data->dev_started)
192                 bonded_eth_dev->data->dev_link.link_status = 0;
193
194         return 0;
195 }
196
197 static int
198 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused,
199                 int wait_to_complete __rte_unused)
200 {
201         return -1;
202 }
203
204 static void
205 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
206 {
207         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
208
209         if (stats)
210                 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats));
211 }
212
213 static void
214 virtual_ethdev_stats_reset(struct rte_eth_dev *dev)
215 {
216         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
217
218         dev_private->rx_pkt_burst_len = 0;
219
220         /* Reset internal statistics */
221         memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats));
222 }
223
224 static void
225 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused)
226 {}
227
228 static void
229 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused)
230 {}
231
232
233 static struct eth_dev_ops virtual_ethdev_default_dev_ops = {
234                 .dev_configure = virtual_ethdev_configure_success,
235                 .dev_start = virtual_ethdev_start_success,
236                 .dev_stop = virtual_ethdev_stop,
237                 .dev_close = virtual_ethdev_close,
238                 .dev_infos_get = virtual_ethdev_info_get,
239                 .rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
240                 .tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
241                 .rx_queue_release = virtual_ethdev_rx_queue_release,
242                 .tx_queue_release = virtual_ethdev_tx_queue_release,
243                 .link_update = virtual_ethdev_link_update_success,
244                 .stats_get = virtual_ethdev_stats_get,
245                 .stats_reset = virtual_ethdev_stats_reset,
246                 .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable,
247                 .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable
248 };
249
250
251 void
252 virtual_ethdev_start_fn_set_success(uint8_t port_id, uint8_t success)
253 {
254         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
255
256         if (success)
257                 vrtl_eth_dev->dev_ops->dev_start = virtual_ethdev_start_success;
258         else
259                 vrtl_eth_dev->dev_ops->dev_start = virtual_ethdev_start_fail;
260
261 }
262
263 void
264 virtual_ethdev_configure_fn_set_success(uint8_t port_id, uint8_t success)
265 {
266         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
267
268         if (success)
269                 vrtl_eth_dev->dev_ops->dev_configure = virtual_ethdev_configure_success;
270         else
271                 vrtl_eth_dev->dev_ops->dev_configure = virtual_ethdev_configure_fail;
272 }
273
274 void
275 virtual_ethdev_rx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
276 {
277         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
278
279         if (success)
280                 vrtl_eth_dev->dev_ops->rx_queue_setup =
281                                 virtual_ethdev_rx_queue_setup_success;
282         else
283                 vrtl_eth_dev->dev_ops->rx_queue_setup =
284                                 virtual_ethdev_rx_queue_setup_fail;
285 }
286
287 void
288 virtual_ethdev_tx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
289 {
290         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
291
292         if (success)
293                 vrtl_eth_dev->dev_ops->tx_queue_setup =
294                                 virtual_ethdev_tx_queue_setup_success;
295         else
296                 vrtl_eth_dev->dev_ops->tx_queue_setup =
297                                 virtual_ethdev_tx_queue_setup_fail;
298 }
299
300 void
301 virtual_ethdev_link_update_fn_set_success(uint8_t port_id, uint8_t success)
302 {
303         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
304
305         if (success)
306                 vrtl_eth_dev->dev_ops->link_update = virtual_ethdev_link_update_success;
307         else
308                 vrtl_eth_dev->dev_ops->link_update = virtual_ethdev_link_update_fail;
309 }
310
311
312 static uint16_t
313 virtual_ethdev_rx_burst_success(void *queue __rte_unused,
314                                                          struct rte_mbuf **bufs,
315                                                          uint16_t nb_pkts)
316 {
317         struct rte_eth_dev *vrtl_eth_dev;
318         struct virtual_ethdev_queue *pq_map;
319         struct virtual_ethdev_private *dev_private;
320
321         int i;
322
323         pq_map = (struct virtual_ethdev_queue *)queue;
324
325         vrtl_eth_dev = &rte_eth_devices[pq_map->port_id];
326
327         dev_private = vrtl_eth_dev->data->dev_private;
328
329         if (dev_private->rx_pkt_burst_len > 0) {
330                 if (dev_private->rx_pkt_burst_len < nb_pkts) {
331
332                         for (i = 0; i < dev_private->rx_pkt_burst_len; i++) {
333                                 bufs[i] = dev_private->rx_pkt_burst[i];
334                                 dev_private->rx_pkt_burst[i] = NULL;
335                         }
336
337                         dev_private->eth_stats.ipackets = dev_private->rx_pkt_burst_len;
338                 }
339                 /* reset private burst values */
340                 dev_private->rx_pkt_burst_len = 0;
341         }
342
343         return dev_private->eth_stats.ipackets;
344 }
345
346 static uint16_t
347 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
348                                                          struct rte_mbuf **bufs __rte_unused,
349                                                          uint16_t nb_pkts __rte_unused)
350 {
351         return 0;
352 }
353
354 static uint16_t
355 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
356                 uint16_t nb_pkts)
357 {
358         struct virtual_ethdev_queue *tx_q = (struct virtual_ethdev_queue *)queue;
359
360         struct rte_eth_dev *vrtl_eth_dev;
361         struct virtual_ethdev_private *dev_private;
362         uint64_t obytes = 0;
363         int i;
364
365         for (i = 0; i < nb_pkts; i++)
366                 obytes += rte_pktmbuf_pkt_len(bufs[i]);
367         vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
368         dev_private = vrtl_eth_dev->data->dev_private;
369
370         if (vrtl_eth_dev->data->dev_link.link_status) {
371                 /* increment opacket count */
372                 dev_private->eth_stats.opackets += nb_pkts;
373                 dev_private->eth_stats.obytes += obytes;
374                 /* free packets in burst */
375                 for (i = 0; i < nb_pkts; i++)
376                         rte_pktmbuf_free(bufs[i]);
377
378                 return nb_pkts;
379         }
380
381         return 0;
382 }
383
384 static uint16_t
385 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
386                 uint16_t nb_pkts)
387 {
388         struct rte_eth_dev *vrtl_eth_dev = NULL;
389         struct virtual_ethdev_queue *tx_q = NULL;
390         struct virtual_ethdev_private *dev_private = NULL;
391
392         int i;
393
394         tx_q = (struct virtual_ethdev_queue *)queue;
395         vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
396         dev_private = vrtl_eth_dev->data->dev_private;
397
398         if (dev_private->tx_burst_fail_count < nb_pkts) {
399                 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count;
400
401                 /* increment opacket count */
402                 dev_private->eth_stats.opackets += successfully_txd;
403
404                 /* free packets in burst */
405                 for (i = 0; i < successfully_txd; i++) {
406                         /* free packets in burst */
407                         if (bufs[i] != NULL)
408                                 rte_pktmbuf_free(bufs[i]);
409
410                         bufs[i] = NULL;
411                 }
412
413                 return successfully_txd;
414         }
415
416         return 0;
417 }
418
419
420 void
421 virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success)
422 {
423         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
424
425         if (success)
426                 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
427         else
428                 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail;
429 }
430
431
432 void
433 virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success)
434 {
435         struct virtual_ethdev_private *dev_private = NULL;
436         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
437
438         dev_private = vrtl_eth_dev->data->dev_private;
439
440         if (success)
441                 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
442         else
443                 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail;
444
445         dev_private->tx_burst_fail_count = 0;
446 }
447
448 void
449 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id,
450                 uint8_t packet_fail_count)
451 {
452         struct virtual_ethdev_private *dev_private = NULL;
453         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
454
455
456         dev_private = vrtl_eth_dev->data->dev_private;
457         dev_private->tx_burst_fail_count = packet_fail_count;
458 }
459
460 void
461 virtual_ethdev_set_link_status(uint8_t port_id, uint8_t link_status)
462 {
463         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
464
465         vrtl_eth_dev->data->dev_link.link_status = link_status;
466 }
467
468 void
469 virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id,
470                 uint8_t link_status)
471 {
472         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
473
474         vrtl_eth_dev->data->dev_link.link_status = link_status;
475
476         _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC);
477 }
478
479
480
481 void
482 virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
483                 struct rte_mbuf **pkt_burst, int burst_length)
484 {
485         struct virtual_ethdev_private *dev_private = NULL;
486         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
487
488         int i;
489
490         dev_private = vrtl_eth_dev->data->dev_private;
491
492         for (i = 0; i < burst_length; i++)
493                 dev_private->rx_pkt_burst[i] = pkt_burst[i];
494
495         dev_private->rx_pkt_burst_len = burst_length;
496 }
497
498 static uint8_t
499 get_number_of_sockets(void)
500 {
501         int sockets = 0;
502         int i;
503         const struct rte_memseg *ms = rte_eal_get_physmem_layout();
504
505         for (i = 0; i < RTE_MAX_MEMSEG && ms[i].addr != NULL; i++) {
506                 if (sockets < ms[i].socket_id)
507                         sockets = ms[i].socket_id;
508         }
509         /* Number of sockets = maximum socket_id + 1 */
510         return ++sockets;
511 }
512
513
514 int
515 virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
516                 uint8_t socket_id, uint8_t isr_support)
517 {
518         struct rte_pci_device *pci_dev = NULL;
519         struct rte_eth_dev *eth_dev = NULL;
520         struct eth_driver *eth_drv = NULL;
521         struct rte_pci_driver *pci_drv = NULL;
522         struct eth_dev_ops *dev_ops = NULL;
523         struct rte_pci_id *id_table = NULL;
524         struct virtual_ethdev_private *dev_private = NULL;
525
526
527         /* now do all data allocation - for eth_dev structure, dummy pci driver
528          * and internal (dev_private) data
529          */
530
531         if (socket_id >= get_number_of_sockets())
532                 goto err;
533
534         pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
535         if (pci_dev == NULL)
536                 goto err;
537
538         eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id);
539         if (eth_drv == NULL)
540                 goto err;
541
542         pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
543         if (pci_drv == NULL)
544                 goto err;
545
546         dev_ops = rte_zmalloc_socket(name, sizeof(*dev_ops), 0, socket_id);
547         if (dev_ops == NULL)
548                 goto err;
549
550         id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id);
551         if (id_table == NULL)
552                 goto err;
553
554         dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id);
555         if (dev_private == NULL)
556                 goto err;
557
558         /* reserve an ethdev entry */
559         eth_dev = rte_eth_dev_allocate(name);
560         if (eth_dev == NULL)
561                 goto err;
562
563         pci_dev->numa_node = socket_id;
564         pci_drv->name = virtual_ethdev_driver_name;
565         pci_drv->id_table = id_table;
566
567         if (isr_support)
568                 pci_drv->drv_flags |= RTE_PCI_DRV_INTR_LSC;
569         else
570                 pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
571
572
573         eth_drv->pci_drv = (struct rte_pci_driver)(*pci_drv);
574         eth_dev->driver = eth_drv;
575
576         eth_dev->data->nb_rx_queues = (uint16_t)1;
577         eth_dev->data->nb_tx_queues = (uint16_t)1;
578
579         TAILQ_INIT(&(eth_dev->callbacks));
580
581         eth_dev->data->dev_link.link_status = 0;
582         eth_dev->data->dev_link.link_speed = ETH_LINK_SPEED_10000;
583         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
584
585         eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
586         if (eth_dev->data->mac_addrs == NULL)
587                 goto err;
588
589         memcpy(eth_dev->data->mac_addrs, mac_addr,
590                         sizeof(*eth_dev->data->mac_addrs));
591
592         eth_dev->data->dev_started = 0;
593         eth_dev->data->promiscuous = 0;
594         eth_dev->data->scattered_rx = 0;
595         eth_dev->data->all_multicast = 0;
596
597         memset(dev_private, 0, sizeof(*dev_private));
598         eth_dev->data->dev_private = dev_private;
599
600         eth_dev->dev_ops = dev_ops;
601
602         /* Copy default device operation functions */
603         memcpy(eth_dev->dev_ops, &virtual_ethdev_default_dev_ops,
604                         sizeof(*eth_dev->dev_ops));
605
606         eth_dev->pci_dev = pci_dev;
607         eth_dev->pci_dev->driver = &eth_drv->pci_drv;
608
609         eth_dev->pci_dev->driver->id_table->device_id = 0xBEEF;
610
611         eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
612         eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
613
614         return eth_dev->data->port_id;
615
616 err:
617         if (pci_dev)
618                 rte_free(pci_dev);
619         if (pci_drv)
620                 rte_free(pci_drv);
621         if (eth_drv)
622                 rte_free(eth_drv);
623         if (dev_ops)
624                 rte_free(dev_ops);
625         if (id_table)
626                 rte_free(id_table);
627         if (dev_private)
628                 rte_free(dev_private);
629
630         return -1;
631 }