null: move to drivers/net/
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_dev.h>
39 #include <rte_kvargs.h>
40
41 #define ETH_NULL_PACKET_SIZE_ARG        "size"
42 #define ETH_NULL_PACKET_COPY_ARG        "copy"
43
44 static unsigned default_packet_size = 64;
45 static unsigned default_packet_copy;
46
47 static const char *valid_arguments[] = {
48         ETH_NULL_PACKET_SIZE_ARG,
49         ETH_NULL_PACKET_COPY_ARG,
50         NULL
51 };
52
53 struct pmd_internals;
54
55 struct null_queue {
56         struct pmd_internals *internals;
57
58         struct rte_mempool *mb_pool;
59         struct rte_mbuf *dummy_packet;
60
61         rte_atomic64_t rx_pkts;
62         rte_atomic64_t tx_pkts;
63         rte_atomic64_t err_pkts;
64 };
65
66 struct pmd_internals {
67         unsigned packet_size;
68         unsigned packet_copy;
69         unsigned numa_node;
70
71         unsigned nb_rx_queues;
72         unsigned nb_tx_queues;
73
74         struct null_queue rx_null_queues[1];
75         struct null_queue tx_null_queues[1];
76 };
77
78
79 static struct ether_addr eth_addr = { .addr_bytes = {0} };
80 static const char *drivername = "Null PMD";
81 static struct rte_eth_link pmd_link = {
82         .link_speed = 10000,
83         .link_duplex = ETH_LINK_FULL_DUPLEX,
84         .link_status = 0
85 };
86
87 static uint16_t
88 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
89 {
90         int i;
91         struct null_queue *h = q;
92         unsigned packet_size;
93
94         if ((q == NULL) || (bufs == NULL))
95                 return 0;
96
97         packet_size = h->internals->packet_size;
98         for (i = 0; i < nb_bufs; i++) {
99                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
100                 if (!bufs[i])
101                         break;
102                 bufs[i]->data_len = (uint16_t)packet_size;
103                 bufs[i]->pkt_len = packet_size;
104                 bufs[i]->nb_segs = 1;
105                 bufs[i]->next = NULL;
106         }
107
108         rte_atomic64_add(&(h->rx_pkts), i);
109
110         return i;
111 }
112
113 static uint16_t
114 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
115 {
116         int i;
117         struct null_queue *h = q;
118         unsigned packet_size;
119
120         if ((q == NULL) || (bufs == NULL))
121                 return 0;
122
123         packet_size = h->internals->packet_size;
124         for (i = 0; i < nb_bufs; i++) {
125                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
126                 if (!bufs[i])
127                         break;
128                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
129                                         packet_size);
130                 bufs[i]->data_len = (uint16_t)packet_size;
131                 bufs[i]->pkt_len = packet_size;
132                 bufs[i]->nb_segs = 1;
133                 bufs[i]->next = NULL;
134         }
135
136         rte_atomic64_add(&(h->rx_pkts), i);
137
138         return i;
139 }
140
141 static uint16_t
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143 {
144         int i;
145         struct null_queue *h = q;
146
147         if ((q == NULL) || (bufs == NULL))
148                 return 0;
149
150         for (i = 0; i < nb_bufs; i++)
151                 rte_pktmbuf_free(bufs[i]);
152
153         rte_atomic64_add(&(h->tx_pkts), i);
154
155         return i;
156 }
157
158 static uint16_t
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
160 {
161         int i;
162         struct null_queue *h = q;
163         unsigned packet_size;
164
165         if ((q == NULL) || (bufs == NULL))
166                 return 0;
167
168         packet_size = h->internals->packet_size;
169         for (i = 0; i < nb_bufs; i++) {
170                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
171                                         packet_size);
172                 rte_pktmbuf_free(bufs[i]);
173         }
174
175         rte_atomic64_add(&(h->tx_pkts), i);
176
177         return i;
178 }
179
180 static int
181 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
182
183 static int
184 eth_dev_start(struct rte_eth_dev *dev)
185 {
186         if (dev == NULL)
187                 return -EINVAL;
188
189         dev->data->dev_link.link_status = 1;
190         return 0;
191 }
192
193 static void
194 eth_dev_stop(struct rte_eth_dev *dev)
195 {
196         if (dev == NULL)
197                 return;
198
199         dev->data->dev_link.link_status = 0;
200 }
201
202 static int
203 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
204                 uint16_t nb_rx_desc __rte_unused,
205                 unsigned int socket_id __rte_unused,
206                 const struct rte_eth_rxconf *rx_conf __rte_unused,
207                 struct rte_mempool *mb_pool)
208 {
209         struct rte_mbuf *dummy_packet;
210         struct pmd_internals *internals;
211         unsigned packet_size;
212
213         if ((dev == NULL) || (mb_pool == NULL))
214                 return -EINVAL;
215
216         if (rx_queue_id != 0)
217                 return -ENODEV;
218
219         internals = dev->data->dev_private;
220         packet_size = internals->packet_size;
221
222         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
223         dev->data->rx_queues[rx_queue_id] =
224                 &internals->rx_null_queues[rx_queue_id];
225         dummy_packet = rte_zmalloc_socket(NULL,
226                         packet_size, 0, internals->numa_node);
227         if (dummy_packet == NULL)
228                 return -ENOMEM;
229
230         internals->rx_null_queues[rx_queue_id].internals = internals;
231         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
232
233         return 0;
234 }
235
236 static int
237 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
238                 uint16_t nb_tx_desc __rte_unused,
239                 unsigned int socket_id __rte_unused,
240                 const struct rte_eth_txconf *tx_conf __rte_unused)
241 {
242         struct rte_mbuf *dummy_packet;
243         struct pmd_internals *internals;
244         unsigned packet_size;
245
246         if (dev == NULL)
247                 return -EINVAL;
248
249         if (tx_queue_id != 0)
250                 return -ENODEV;
251
252         internals = dev->data->dev_private;
253         packet_size = internals->packet_size;
254
255         dev->data->tx_queues[tx_queue_id] =
256                 &internals->tx_null_queues[tx_queue_id];
257         dummy_packet = rte_zmalloc_socket(NULL,
258                         packet_size, 0, internals->numa_node);
259         if (dummy_packet == NULL)
260                 return -ENOMEM;
261
262         internals->tx_null_queues[tx_queue_id].internals = internals;
263         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
264
265         return 0;
266 }
267
268
269 static void
270 eth_dev_info(struct rte_eth_dev *dev,
271                 struct rte_eth_dev_info *dev_info)
272 {
273         struct pmd_internals *internals;
274
275         if ((dev == NULL) || (dev_info == NULL))
276                 return;
277
278         internals = dev->data->dev_private;
279         dev_info->driver_name = drivername;
280         dev_info->max_mac_addrs = 1;
281         dev_info->max_rx_pktlen = (uint32_t)-1;
282         dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
283         dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
284         dev_info->min_rx_bufsize = 0;
285         dev_info->pci_dev = NULL;
286 }
287
288 static void
289 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
290 {
291         unsigned i, num_stats;
292         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
293         const struct pmd_internals *internal;
294
295         if ((dev == NULL) || (igb_stats == NULL))
296                 return;
297
298         internal = dev->data->dev_private;
299         memset(igb_stats, 0, sizeof(*igb_stats));
300         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
301                                         internal->nb_rx_queues);
302         for (i = 0; i < num_stats; i++) {
303                 igb_stats->q_ipackets[i] =
304                         internal->rx_null_queues[i].rx_pkts.cnt;
305                 rx_total += igb_stats->q_ipackets[i];
306         }
307
308         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
309                                         internal->nb_tx_queues);
310         for (i = 0; i < num_stats; i++) {
311                 igb_stats->q_opackets[i] =
312                         internal->tx_null_queues[i].tx_pkts.cnt;
313                 igb_stats->q_errors[i] =
314                         internal->tx_null_queues[i].err_pkts.cnt;
315                 tx_total += igb_stats->q_opackets[i];
316                 tx_err_total += igb_stats->q_errors[i];
317         }
318
319         igb_stats->ipackets = rx_total;
320         igb_stats->opackets = tx_total;
321         igb_stats->oerrors = tx_err_total;
322 }
323
324 static void
325 eth_stats_reset(struct rte_eth_dev *dev)
326 {
327         unsigned i;
328         struct pmd_internals *internal;
329
330         if (dev == NULL)
331                 return;
332
333         internal = dev->data->dev_private;
334         for (i = 0; i < internal->nb_rx_queues; i++)
335                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
336         for (i = 0; i < internal->nb_tx_queues; i++) {
337                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
338                 internal->tx_null_queues[i].err_pkts.cnt = 0;
339         }
340 }
341
342 static struct eth_driver rte_null_pmd = {
343         .pci_drv = {
344                 .name = "rte_null_pmd",
345                 .drv_flags = RTE_PCI_DRV_DETACHABLE,
346         },
347 };
348
349 static void
350 eth_queue_release(void *q)
351 {
352         struct null_queue *nq;
353
354         if (q == NULL)
355                 return;
356
357         nq = q;
358         rte_free(nq->dummy_packet);
359 }
360
361 static int
362 eth_link_update(struct rte_eth_dev *dev __rte_unused,
363                 int wait_to_complete __rte_unused) { return 0; }
364
365 static const struct eth_dev_ops ops = {
366         .dev_start = eth_dev_start,
367         .dev_stop = eth_dev_stop,
368         .dev_configure = eth_dev_configure,
369         .dev_infos_get = eth_dev_info,
370         .rx_queue_setup = eth_rx_queue_setup,
371         .tx_queue_setup = eth_tx_queue_setup,
372         .rx_queue_release = eth_queue_release,
373         .tx_queue_release = eth_queue_release,
374         .link_update = eth_link_update,
375         .stats_get = eth_stats_get,
376         .stats_reset = eth_stats_reset,
377 };
378
379 static int
380 eth_dev_null_create(const char *name,
381                 const unsigned numa_node,
382                 unsigned packet_size,
383                 unsigned packet_copy)
384 {
385         const unsigned nb_rx_queues = 1;
386         const unsigned nb_tx_queues = 1;
387         struct rte_eth_dev_data *data = NULL;
388         struct rte_pci_device *pci_dev = NULL;
389         struct pmd_internals *internals = NULL;
390         struct rte_eth_dev *eth_dev = NULL;
391
392         if (name == NULL)
393                 return -EINVAL;
394
395         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
396                         numa_node);
397
398         /* now do all data allocation - for eth_dev structure, dummy pci driver
399          * and internal (private) data
400          */
401         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
402         if (data == NULL)
403                 goto error;
404
405         pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
406         if (pci_dev == NULL)
407                 goto error;
408
409         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
410         if (internals == NULL)
411                 goto error;
412
413         /* reserve an ethdev entry */
414         eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
415         if (eth_dev == NULL)
416                 goto error;
417
418         /* now put it all together
419          * - store queue data in internals,
420          * - store numa_node info in pci_driver
421          * - point eth_dev_data to internals and pci_driver
422          * - and point eth_dev structure to new eth_dev_data structure
423          */
424         /* NOTE: we'll replace the data element, of originally allocated eth_dev
425          * so the nulls are local per-process */
426
427         internals->nb_rx_queues = nb_rx_queues;
428         internals->nb_tx_queues = nb_tx_queues;
429         internals->packet_size = packet_size;
430         internals->packet_copy = packet_copy;
431         internals->numa_node = numa_node;
432
433         pci_dev->numa_node = numa_node;
434
435         data->dev_private = internals;
436         data->port_id = eth_dev->data->port_id;
437         data->nb_rx_queues = (uint16_t)nb_rx_queues;
438         data->nb_tx_queues = (uint16_t)nb_tx_queues;
439         data->dev_link = pmd_link;
440         data->mac_addrs = &eth_addr;
441         strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
442
443         eth_dev->data = data;
444         eth_dev->dev_ops = &ops;
445         eth_dev->pci_dev = pci_dev;
446         eth_dev->driver = &rte_null_pmd;
447
448         /* finally assign rx and tx ops */
449         if (packet_copy) {
450                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
451                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
452         } else {
453                 eth_dev->rx_pkt_burst = eth_null_rx;
454                 eth_dev->tx_pkt_burst = eth_null_tx;
455         }
456
457         return 0;
458
459 error:
460         rte_free(data);
461         rte_free(pci_dev);
462         rte_free(internals);
463
464         return -1;
465 }
466
467 static inline int
468 get_packet_size_arg(const char *key __rte_unused,
469                 const char *value, void *extra_args)
470 {
471         const char *a = value;
472         unsigned *packet_size = extra_args;
473
474         if ((value == NULL) || (extra_args == NULL))
475                 return -EINVAL;
476
477         *packet_size = (unsigned)strtoul(a, NULL, 0);
478         if (*packet_size == UINT_MAX)
479                 return -1;
480
481         return 0;
482 }
483
484 static inline int
485 get_packet_copy_arg(const char *key __rte_unused,
486                 const char *value, void *extra_args)
487 {
488         const char *a = value;
489         unsigned *packet_copy = extra_args;
490
491         if ((value == NULL) || (extra_args == NULL))
492                 return -EINVAL;
493
494         *packet_copy = (unsigned)strtoul(a, NULL, 0);
495         if (*packet_copy == UINT_MAX)
496                 return -1;
497
498         return 0;
499 }
500
501 static int
502 rte_pmd_null_devinit(const char *name, const char *params)
503 {
504         unsigned numa_node;
505         unsigned packet_size = default_packet_size;
506         unsigned packet_copy = default_packet_copy;
507         struct rte_kvargs *kvlist = NULL;
508         int ret;
509
510         if (name == NULL)
511                 return -EINVAL;
512
513         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
514
515         numa_node = rte_socket_id();
516
517         if (params != NULL) {
518                 kvlist = rte_kvargs_parse(params, valid_arguments);
519                 if (kvlist == NULL)
520                         return -1;
521
522                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
523
524                         ret = rte_kvargs_process(kvlist,
525                                         ETH_NULL_PACKET_SIZE_ARG,
526                                         &get_packet_size_arg, &packet_size);
527                         if (ret < 0)
528                                 goto free_kvlist;
529                 }
530
531                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
532
533                         ret = rte_kvargs_process(kvlist,
534                                         ETH_NULL_PACKET_COPY_ARG,
535                                         &get_packet_copy_arg, &packet_copy);
536                         if (ret < 0)
537                                 goto free_kvlist;
538                 }
539         }
540
541         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
542                         "packet copy is %s\n", packet_size,
543                         packet_copy ? "enabled" : "disabled");
544
545         ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
546
547 free_kvlist:
548         if (kvlist)
549                 rte_kvargs_free(kvlist);
550         return ret;
551 }
552
553 static int
554 rte_pmd_null_devuninit(const char *name)
555 {
556         struct rte_eth_dev *eth_dev = NULL;
557
558         if (name == NULL)
559                 return -EINVAL;
560
561         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
562                         rte_socket_id());
563
564         /* reserve an ethdev entry */
565         eth_dev = rte_eth_dev_allocated(name);
566         if (eth_dev == NULL)
567                 return -1;
568
569         rte_free(eth_dev->data->dev_private);
570         rte_free(eth_dev->data);
571         rte_free(eth_dev->pci_dev);
572
573         rte_eth_dev_release_port(eth_dev);
574
575         return 0;
576 }
577
578 static struct rte_driver pmd_null_drv = {
579         .name = "eth_null",
580         .type = PMD_VDEV,
581         .init = rte_pmd_null_devinit,
582         .uninit = rte_pmd_null_devuninit,
583 };
584
585 PMD_REGISTER_DRIVER(pmd_null_drv);