null: fix crash when added to bonding
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_dev.h>
39 #include <rte_kvargs.h>
40
41 #define ETH_NULL_PACKET_SIZE_ARG        "size"
42 #define ETH_NULL_PACKET_COPY_ARG        "copy"
43
44 static unsigned default_packet_size = 64;
45 static unsigned default_packet_copy;
46
47 static const char *valid_arguments[] = {
48         ETH_NULL_PACKET_SIZE_ARG,
49         ETH_NULL_PACKET_COPY_ARG,
50         NULL
51 };
52
53 struct pmd_internals;
54
55 struct null_queue {
56         struct pmd_internals *internals;
57
58         struct rte_mempool *mb_pool;
59         struct rte_mbuf *dummy_packet;
60
61         rte_atomic64_t rx_pkts;
62         rte_atomic64_t tx_pkts;
63         rte_atomic64_t err_pkts;
64 };
65
66 struct pmd_internals {
67         unsigned packet_size;
68         unsigned packet_copy;
69         unsigned numa_node;
70
71         unsigned nb_rx_queues;
72         unsigned nb_tx_queues;
73
74         struct null_queue rx_null_queues[1];
75         struct null_queue tx_null_queues[1];
76 };
77
78
79 static struct ether_addr eth_addr = { .addr_bytes = {0} };
80 static const char *drivername = "Null PMD";
81 static struct rte_eth_link pmd_link = {
82         .link_speed = 10000,
83         .link_duplex = ETH_LINK_FULL_DUPLEX,
84         .link_status = 0
85 };
86
87 static uint16_t
88 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
89 {
90         int i;
91         struct null_queue *h = q;
92         unsigned packet_size;
93
94         if ((q == NULL) || (bufs == NULL))
95                 return 0;
96
97         packet_size = h->internals->packet_size;
98         for (i = 0; i < nb_bufs; i++) {
99                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
100                 if (!bufs[i])
101                         break;
102                 bufs[i]->data_len = (uint16_t)packet_size;
103                 bufs[i]->pkt_len = packet_size;
104                 bufs[i]->nb_segs = 1;
105                 bufs[i]->next = NULL;
106         }
107
108         rte_atomic64_add(&(h->rx_pkts), i);
109
110         return i;
111 }
112
113 static uint16_t
114 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
115 {
116         int i;
117         struct null_queue *h = q;
118         unsigned packet_size;
119
120         if ((q == NULL) || (bufs == NULL))
121                 return 0;
122
123         packet_size = h->internals->packet_size;
124         for (i = 0; i < nb_bufs; i++) {
125                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
126                 if (!bufs[i])
127                         break;
128                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
129                                         packet_size);
130                 bufs[i]->data_len = (uint16_t)packet_size;
131                 bufs[i]->pkt_len = packet_size;
132                 bufs[i]->nb_segs = 1;
133                 bufs[i]->next = NULL;
134         }
135
136         rte_atomic64_add(&(h->rx_pkts), i);
137
138         return i;
139 }
140
141 static uint16_t
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143 {
144         int i;
145         struct null_queue *h = q;
146
147         if ((q == NULL) || (bufs == NULL))
148                 return 0;
149
150         for (i = 0; i < nb_bufs; i++)
151                 rte_pktmbuf_free(bufs[i]);
152
153         rte_atomic64_add(&(h->tx_pkts), i);
154
155         return i;
156 }
157
158 static uint16_t
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
160 {
161         int i;
162         struct null_queue *h = q;
163         unsigned packet_size;
164
165         if ((q == NULL) || (bufs == NULL))
166                 return 0;
167
168         packet_size = h->internals->packet_size;
169         for (i = 0; i < nb_bufs; i++) {
170                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
171                                         packet_size);
172                 rte_pktmbuf_free(bufs[i]);
173         }
174
175         rte_atomic64_add(&(h->tx_pkts), i);
176
177         return i;
178 }
179
180 static int
181 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
182
183 static int
184 eth_dev_start(struct rte_eth_dev *dev)
185 {
186         if (dev == NULL)
187                 return -EINVAL;
188
189         dev->data->dev_link.link_status = 1;
190         return 0;
191 }
192
193 static void
194 eth_dev_stop(struct rte_eth_dev *dev)
195 {
196         if (dev == NULL)
197                 return;
198
199         dev->data->dev_link.link_status = 0;
200 }
201
202 static int
203 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
204                 uint16_t nb_rx_desc __rte_unused,
205                 unsigned int socket_id __rte_unused,
206                 const struct rte_eth_rxconf *rx_conf __rte_unused,
207                 struct rte_mempool *mb_pool)
208 {
209         struct rte_mbuf *dummy_packet;
210         struct pmd_internals *internals;
211         unsigned packet_size;
212
213         if ((dev == NULL) || (mb_pool == NULL))
214                 return -EINVAL;
215
216         if (rx_queue_id != 0)
217                 return -ENODEV;
218
219         internals = dev->data->dev_private;
220         packet_size = internals->packet_size;
221
222         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
223         dev->data->rx_queues[rx_queue_id] =
224                 &internals->rx_null_queues[rx_queue_id];
225         dummy_packet = rte_zmalloc_socket(NULL,
226                         packet_size, 0, internals->numa_node);
227         if (dummy_packet == NULL)
228                 return -ENOMEM;
229
230         internals->rx_null_queues[rx_queue_id].internals = internals;
231         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
232
233         return 0;
234 }
235
236 static int
237 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
238                 uint16_t nb_tx_desc __rte_unused,
239                 unsigned int socket_id __rte_unused,
240                 const struct rte_eth_txconf *tx_conf __rte_unused)
241 {
242         struct rte_mbuf *dummy_packet;
243         struct pmd_internals *internals;
244         unsigned packet_size;
245
246         if (dev == NULL)
247                 return -EINVAL;
248
249         if (tx_queue_id != 0)
250                 return -ENODEV;
251
252         internals = dev->data->dev_private;
253         packet_size = internals->packet_size;
254
255         dev->data->tx_queues[tx_queue_id] =
256                 &internals->tx_null_queues[tx_queue_id];
257         dummy_packet = rte_zmalloc_socket(NULL,
258                         packet_size, 0, internals->numa_node);
259         if (dummy_packet == NULL)
260                 return -ENOMEM;
261
262         internals->tx_null_queues[tx_queue_id].internals = internals;
263         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
264
265         return 0;
266 }
267
268
269 static void
270 eth_dev_info(struct rte_eth_dev *dev,
271                 struct rte_eth_dev_info *dev_info)
272 {
273         struct pmd_internals *internals;
274
275         if ((dev == NULL) || (dev_info == NULL))
276                 return;
277
278         internals = dev->data->dev_private;
279         dev_info->driver_name = drivername;
280         dev_info->max_mac_addrs = 1;
281         dev_info->max_rx_pktlen = (uint32_t)-1;
282         dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
283         dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
284         dev_info->min_rx_bufsize = 0;
285         dev_info->pci_dev = NULL;
286 }
287
288 static void
289 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
290 {
291         unsigned i, num_stats;
292         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
293         const struct pmd_internals *internal;
294
295         if ((dev == NULL) || (igb_stats == NULL))
296                 return;
297
298         internal = dev->data->dev_private;
299         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
300                         RTE_MIN(internal->nb_rx_queues,
301                                 RTE_DIM(internal->rx_null_queues)));
302         for (i = 0; i < num_stats; i++) {
303                 igb_stats->q_ipackets[i] =
304                         internal->rx_null_queues[i].rx_pkts.cnt;
305                 rx_total += igb_stats->q_ipackets[i];
306         }
307
308         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
309                         RTE_MIN(internal->nb_tx_queues,
310                                 RTE_DIM(internal->tx_null_queues)));
311         for (i = 0; i < num_stats; i++) {
312                 igb_stats->q_opackets[i] =
313                         internal->tx_null_queues[i].tx_pkts.cnt;
314                 igb_stats->q_errors[i] =
315                         internal->tx_null_queues[i].err_pkts.cnt;
316                 tx_total += igb_stats->q_opackets[i];
317                 tx_err_total += igb_stats->q_errors[i];
318         }
319
320         igb_stats->ipackets = rx_total;
321         igb_stats->opackets = tx_total;
322         igb_stats->oerrors = tx_err_total;
323 }
324
325 static void
326 eth_stats_reset(struct rte_eth_dev *dev)
327 {
328         unsigned i;
329         struct pmd_internals *internal;
330
331         if (dev == NULL)
332                 return;
333
334         internal = dev->data->dev_private;
335         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
336                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
337         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
338                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
339                 internal->tx_null_queues[i].err_pkts.cnt = 0;
340         }
341 }
342
343 static struct eth_driver rte_null_pmd = {
344         .pci_drv = {
345                 .name = "rte_null_pmd",
346                 .drv_flags = RTE_PCI_DRV_DETACHABLE,
347         },
348 };
349
350 static void
351 eth_queue_release(void *q)
352 {
353         struct null_queue *nq;
354
355         if (q == NULL)
356                 return;
357
358         nq = q;
359         rte_free(nq->dummy_packet);
360 }
361
362 static int
363 eth_link_update(struct rte_eth_dev *dev __rte_unused,
364                 int wait_to_complete __rte_unused) { return 0; }
365
366 static const struct eth_dev_ops ops = {
367         .dev_start = eth_dev_start,
368         .dev_stop = eth_dev_stop,
369         .dev_configure = eth_dev_configure,
370         .dev_infos_get = eth_dev_info,
371         .rx_queue_setup = eth_rx_queue_setup,
372         .tx_queue_setup = eth_tx_queue_setup,
373         .rx_queue_release = eth_queue_release,
374         .tx_queue_release = eth_queue_release,
375         .link_update = eth_link_update,
376         .stats_get = eth_stats_get,
377         .stats_reset = eth_stats_reset,
378 };
379
380 static int
381 eth_dev_null_create(const char *name,
382                 const unsigned numa_node,
383                 unsigned packet_size,
384                 unsigned packet_copy)
385 {
386         const unsigned nb_rx_queues = 1;
387         const unsigned nb_tx_queues = 1;
388         struct rte_eth_dev_data *data = NULL;
389         struct rte_pci_device *pci_dev = NULL;
390         struct pmd_internals *internals = NULL;
391         struct rte_eth_dev *eth_dev = NULL;
392
393         if (name == NULL)
394                 return -EINVAL;
395
396         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
397                         numa_node);
398
399         /* now do all data allocation - for eth_dev structure, dummy pci driver
400          * and internal (private) data
401          */
402         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
403         if (data == NULL)
404                 goto error;
405
406         pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
407         if (pci_dev == NULL)
408                 goto error;
409
410         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
411         if (internals == NULL)
412                 goto error;
413
414         /* reserve an ethdev entry */
415         eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
416         if (eth_dev == NULL)
417                 goto error;
418
419         /* now put it all together
420          * - store queue data in internals,
421          * - store numa_node info in pci_driver
422          * - point eth_dev_data to internals and pci_driver
423          * - and point eth_dev structure to new eth_dev_data structure
424          */
425         /* NOTE: we'll replace the data element, of originally allocated eth_dev
426          * so the nulls are local per-process */
427
428         internals->nb_rx_queues = nb_rx_queues;
429         internals->nb_tx_queues = nb_tx_queues;
430         internals->packet_size = packet_size;
431         internals->packet_copy = packet_copy;
432         internals->numa_node = numa_node;
433
434         pci_dev->numa_node = numa_node;
435         pci_dev->driver = &rte_null_pmd.pci_drv;
436
437         data->dev_private = internals;
438         data->port_id = eth_dev->data->port_id;
439         data->nb_rx_queues = (uint16_t)nb_rx_queues;
440         data->nb_tx_queues = (uint16_t)nb_tx_queues;
441         data->dev_link = pmd_link;
442         data->mac_addrs = &eth_addr;
443         strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
444
445         eth_dev->data = data;
446         eth_dev->dev_ops = &ops;
447         eth_dev->pci_dev = pci_dev;
448         eth_dev->driver = &rte_null_pmd;
449         TAILQ_INIT(&eth_dev->link_intr_cbs);
450
451         /* finally assign rx and tx ops */
452         if (packet_copy) {
453                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
454                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
455         } else {
456                 eth_dev->rx_pkt_burst = eth_null_rx;
457                 eth_dev->tx_pkt_burst = eth_null_tx;
458         }
459
460         return 0;
461
462 error:
463         rte_free(data);
464         rte_free(pci_dev);
465         rte_free(internals);
466
467         return -1;
468 }
469
470 static inline int
471 get_packet_size_arg(const char *key __rte_unused,
472                 const char *value, void *extra_args)
473 {
474         const char *a = value;
475         unsigned *packet_size = extra_args;
476
477         if ((value == NULL) || (extra_args == NULL))
478                 return -EINVAL;
479
480         *packet_size = (unsigned)strtoul(a, NULL, 0);
481         if (*packet_size == UINT_MAX)
482                 return -1;
483
484         return 0;
485 }
486
487 static inline int
488 get_packet_copy_arg(const char *key __rte_unused,
489                 const char *value, void *extra_args)
490 {
491         const char *a = value;
492         unsigned *packet_copy = extra_args;
493
494         if ((value == NULL) || (extra_args == NULL))
495                 return -EINVAL;
496
497         *packet_copy = (unsigned)strtoul(a, NULL, 0);
498         if (*packet_copy == UINT_MAX)
499                 return -1;
500
501         return 0;
502 }
503
504 static int
505 rte_pmd_null_devinit(const char *name, const char *params)
506 {
507         unsigned numa_node;
508         unsigned packet_size = default_packet_size;
509         unsigned packet_copy = default_packet_copy;
510         struct rte_kvargs *kvlist = NULL;
511         int ret;
512
513         if (name == NULL)
514                 return -EINVAL;
515
516         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
517
518         numa_node = rte_socket_id();
519
520         if (params != NULL) {
521                 kvlist = rte_kvargs_parse(params, valid_arguments);
522                 if (kvlist == NULL)
523                         return -1;
524
525                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
526
527                         ret = rte_kvargs_process(kvlist,
528                                         ETH_NULL_PACKET_SIZE_ARG,
529                                         &get_packet_size_arg, &packet_size);
530                         if (ret < 0)
531                                 goto free_kvlist;
532                 }
533
534                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
535
536                         ret = rte_kvargs_process(kvlist,
537                                         ETH_NULL_PACKET_COPY_ARG,
538                                         &get_packet_copy_arg, &packet_copy);
539                         if (ret < 0)
540                                 goto free_kvlist;
541                 }
542         }
543
544         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
545                         "packet copy is %s\n", packet_size,
546                         packet_copy ? "enabled" : "disabled");
547
548         ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
549
550 free_kvlist:
551         if (kvlist)
552                 rte_kvargs_free(kvlist);
553         return ret;
554 }
555
556 static int
557 rte_pmd_null_devuninit(const char *name)
558 {
559         struct rte_eth_dev *eth_dev = NULL;
560
561         if (name == NULL)
562                 return -EINVAL;
563
564         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
565                         rte_socket_id());
566
567         /* reserve an ethdev entry */
568         eth_dev = rte_eth_dev_allocated(name);
569         if (eth_dev == NULL)
570                 return -1;
571
572         rte_free(eth_dev->data->dev_private);
573         rte_free(eth_dev->data);
574         rte_free(eth_dev->pci_dev);
575
576         rte_eth_dev_release_port(eth_dev);
577
578         return 0;
579 }
580
581 static struct rte_driver pmd_null_drv = {
582         .name = "eth_null",
583         .type = PMD_VDEV,
584         .init = rte_pmd_null_devinit,
585         .uninit = rte_pmd_null_devuninit,
586 };
587
588 PMD_REGISTER_DRIVER(pmd_null_drv);