8fe1a2d5543437d34441370318e44d79ae728b0c
[dpdk.git] / lib / librte_pmd_null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_dev.h>
39 #include <rte_kvargs.h>
40
41 #define ETH_NULL_PACKET_SIZE_ARG        "size"
42 #define ETH_NULL_PACKET_COPY_ARG        "copy"
43
44 static unsigned default_packet_size = 64;
45 static unsigned default_packet_copy;
46
47 static const char *valid_arguments[] = {
48         ETH_NULL_PACKET_SIZE_ARG,
49         ETH_NULL_PACKET_COPY_ARG,
50         NULL
51 };
52
53 struct pmd_internals;
54
55 struct null_queue {
56         struct pmd_internals *internals;
57
58         struct rte_mempool *mb_pool;
59         struct rte_mbuf *dummy_packet;
60
61         rte_atomic64_t rx_pkts;
62         rte_atomic64_t tx_pkts;
63         rte_atomic64_t err_pkts;
64 };
65
66 struct pmd_internals {
67         unsigned packet_size;
68         unsigned packet_copy;
69         unsigned numa_node;
70
71         unsigned nb_rx_queues;
72         unsigned nb_tx_queues;
73
74         struct null_queue rx_null_queues[1];
75         struct null_queue tx_null_queues[1];
76 };
77
78
79 static struct ether_addr eth_addr = { .addr_bytes = {0} };
80 static const char *drivername = "Null PMD";
81 static struct rte_eth_link pmd_link = {
82         .link_speed = 10000,
83         .link_duplex = ETH_LINK_FULL_DUPLEX,
84         .link_status = 0
85 };
86
87 static uint16_t
88 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
89 {
90         int i;
91         struct null_queue *h = q;
92         unsigned packet_size;
93
94         if ((q == NULL) || (bufs == NULL))
95                 return 0;
96
97         packet_size = h->internals->packet_size;
98         for (i = 0; i < nb_bufs; i++) {
99                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
100                 if (!bufs[i])
101                         break;
102                 bufs[i]->data_len = (uint16_t)packet_size;
103                 bufs[i]->pkt_len = packet_size;
104                 bufs[i]->nb_segs = 1;
105                 bufs[i]->next = NULL;
106         }
107
108         rte_atomic64_add(&(h->rx_pkts), i);
109
110         return i;
111 }
112
113 static uint16_t
114 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
115 {
116         int i;
117         struct null_queue *h = q;
118         unsigned packet_size;
119
120         if ((q == NULL) || (bufs == NULL))
121                 return 0;
122
123         packet_size = h->internals->packet_size;
124         for (i = 0; i < nb_bufs; i++) {
125                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
126                 if (!bufs[i])
127                         break;
128                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
129                                         packet_size);
130                 bufs[i]->data_len = (uint16_t)packet_size;
131                 bufs[i]->pkt_len = packet_size;
132                 bufs[i]->nb_segs = 1;
133                 bufs[i]->next = NULL;
134         }
135
136         rte_atomic64_add(&(h->rx_pkts), i);
137
138         return i;
139 }
140
141 static uint16_t
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143 {
144         int i;
145         struct null_queue *h = q;
146
147         if ((q == NULL) || (bufs == NULL))
148                 return 0;
149
150         for (i = 0; i < nb_bufs; i++)
151                 rte_pktmbuf_free(bufs[i]);
152
153         rte_atomic64_add(&(h->tx_pkts), i);
154
155         return i;
156 }
157
158 static uint16_t
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
160 {
161         int i;
162         struct null_queue *h = q;
163         unsigned packet_size = h->internals->packet_size;
164
165         if ((q == NULL) || (bufs == NULL))
166                 return 0;
167
168         for (i = 0; i < nb_bufs; i++) {
169                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
170                                         packet_size);
171                 rte_pktmbuf_free(bufs[i]);
172         }
173
174         rte_atomic64_add(&(h->tx_pkts), i);
175
176         return i;
177 }
178
179 static int
180 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
181
182 static int
183 eth_dev_start(struct rte_eth_dev *dev)
184 {
185         if (dev == NULL)
186                 return -EINVAL;
187
188         dev->data->dev_link.link_status = 1;
189         return 0;
190 }
191
192 static void
193 eth_dev_stop(struct rte_eth_dev *dev)
194 {
195         if (dev == NULL)
196                 return;
197
198         dev->data->dev_link.link_status = 0;
199 }
200
201 static int
202 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
203                 uint16_t nb_rx_desc __rte_unused,
204                 unsigned int socket_id __rte_unused,
205                 const struct rte_eth_rxconf *rx_conf __rte_unused,
206                 struct rte_mempool *mb_pool)
207 {
208         struct rte_mbuf *dummy_packet;
209         struct pmd_internals *internals;
210         unsigned packet_size;
211
212         if ((dev == NULL) || (mb_pool == NULL))
213                 return -EINVAL;
214
215         if (rx_queue_id != 0)
216                 return -ENODEV;
217
218         internals = dev->data->dev_private;
219         packet_size = internals->packet_size;
220
221         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
222         dev->data->rx_queues[rx_queue_id] =
223                 &internals->rx_null_queues[rx_queue_id];
224         dummy_packet = rte_zmalloc_socket(NULL,
225                         packet_size, 0, internals->numa_node);
226         if (dummy_packet == NULL)
227                 return -ENOMEM;
228
229         internals->rx_null_queues[rx_queue_id].internals = internals;
230         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
231
232         return 0;
233 }
234
235 static int
236 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
237                 uint16_t nb_tx_desc __rte_unused,
238                 unsigned int socket_id __rte_unused,
239                 const struct rte_eth_txconf *tx_conf __rte_unused)
240 {
241         struct rte_mbuf *dummy_packet;
242         struct pmd_internals *internals;
243         unsigned packet_size;
244
245         if (dev == NULL)
246                 return -EINVAL;
247
248         if (tx_queue_id != 0)
249                 return -ENODEV;
250
251         internals = dev->data->dev_private;
252         packet_size = internals->packet_size;
253
254         dev->data->tx_queues[tx_queue_id] =
255                 &internals->tx_null_queues[tx_queue_id];
256         dummy_packet = rte_zmalloc_socket(NULL,
257                         packet_size, 0, internals->numa_node);
258         if (dummy_packet == NULL)
259                 return -ENOMEM;
260
261         internals->tx_null_queues[tx_queue_id].internals = internals;
262         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
263
264         return 0;
265 }
266
267
268 static void
269 eth_dev_info(struct rte_eth_dev *dev,
270                 struct rte_eth_dev_info *dev_info)
271 {
272         struct pmd_internals *internals;
273
274         if ((dev == NULL) || (dev_info == NULL))
275                 return;
276
277         internals = dev->data->dev_private;
278         dev_info->driver_name = drivername;
279         dev_info->max_mac_addrs = 1;
280         dev_info->max_rx_pktlen = (uint32_t)-1;
281         dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
282         dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
283         dev_info->min_rx_bufsize = 0;
284         dev_info->pci_dev = NULL;
285 }
286
287 static void
288 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
289 {
290         unsigned i, num_stats;
291         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
292         const struct pmd_internals *internal;
293
294         if ((dev == NULL) || (igb_stats == NULL))
295                 return;
296
297         internal = dev->data->dev_private;
298         memset(igb_stats, 0, sizeof(*igb_stats));
299         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
300                                         internal->nb_rx_queues);
301         for (i = 0; i < num_stats; i++) {
302                 igb_stats->q_ipackets[i] =
303                         internal->rx_null_queues[i].rx_pkts.cnt;
304                 rx_total += igb_stats->q_ipackets[i];
305         }
306
307         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
308                                         internal->nb_tx_queues);
309         for (i = 0; i < num_stats; i++) {
310                 igb_stats->q_opackets[i] =
311                         internal->tx_null_queues[i].tx_pkts.cnt;
312                 igb_stats->q_errors[i] =
313                         internal->tx_null_queues[i].err_pkts.cnt;
314                 tx_total += igb_stats->q_opackets[i];
315                 tx_err_total += igb_stats->q_errors[i];
316         }
317
318         igb_stats->ipackets = rx_total;
319         igb_stats->opackets = tx_total;
320         igb_stats->oerrors = tx_err_total;
321 }
322
323 static void
324 eth_stats_reset(struct rte_eth_dev *dev)
325 {
326         unsigned i;
327         struct pmd_internals *internal;
328
329         if (dev == NULL)
330                 return;
331
332         internal = dev->data->dev_private;
333         for (i = 0; i < internal->nb_rx_queues; i++)
334                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
335         for (i = 0; i < internal->nb_tx_queues; i++) {
336                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
337                 internal->tx_null_queues[i].err_pkts.cnt = 0;
338         }
339 }
340
341 static struct eth_driver rte_null_pmd = {
342         .pci_drv = {
343                 .name = "rte_null_pmd",
344                 .drv_flags = RTE_PCI_DRV_DETACHABLE,
345         },
346 };
347
348 static void
349 eth_queue_release(void *q)
350 {
351         struct null_queue *nq;
352
353         if (q == NULL)
354                 return;
355
356         nq = q;
357         if (nq->dummy_packet)
358                 rte_free(nq->dummy_packet);
359 }
360
361 static int
362 eth_link_update(struct rte_eth_dev *dev __rte_unused,
363                 int wait_to_complete __rte_unused) { return 0; }
364
365 static struct eth_dev_ops ops = {
366                 .dev_start = eth_dev_start,
367                 .dev_stop = eth_dev_stop,
368                 .dev_configure = eth_dev_configure,
369                 .dev_infos_get = eth_dev_info,
370                 .rx_queue_setup = eth_rx_queue_setup,
371                 .tx_queue_setup = eth_tx_queue_setup,
372                 .rx_queue_release = eth_queue_release,
373                 .tx_queue_release = eth_queue_release,
374                 .link_update = eth_link_update,
375                 .stats_get = eth_stats_get,
376                 .stats_reset = eth_stats_reset,
377 };
378
379 static int
380 eth_dev_null_create(const char *name,
381                 const unsigned numa_node,
382                 unsigned packet_size,
383                 unsigned packet_copy)
384 {
385         const unsigned nb_rx_queues = 1;
386         const unsigned nb_tx_queues = 1;
387         struct rte_eth_dev_data *data = NULL;
388         struct rte_pci_device *pci_dev = NULL;
389         struct pmd_internals *internals = NULL;
390         struct rte_eth_dev *eth_dev = NULL;
391
392         if (name == NULL)
393                 return -EINVAL;
394
395         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
396                         numa_node);
397
398         /* now do all data allocation - for eth_dev structure, dummy pci driver
399          * and internal (private) data
400          */
401         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
402         if (data == NULL)
403                 goto error;
404
405         pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
406         if (pci_dev == NULL)
407                 goto error;
408
409         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
410         if (internals == NULL)
411                 goto error;
412
413         /* reserve an ethdev entry */
414         eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
415         if (eth_dev == NULL)
416                 goto error;
417
418         /* now put it all together
419          * - store queue data in internals,
420          * - store numa_node info in pci_driver
421          * - point eth_dev_data to internals and pci_driver
422          * - and point eth_dev structure to new eth_dev_data structure
423          */
424         /* NOTE: we'll replace the data element, of originally allocated eth_dev
425          * so the nulls are local per-process */
426
427         internals->nb_rx_queues = nb_rx_queues;
428         internals->nb_tx_queues = nb_tx_queues;
429         internals->packet_size = packet_size;
430         internals->packet_copy = packet_copy;
431         internals->numa_node = numa_node;
432
433         pci_dev->numa_node = numa_node;
434
435         data->dev_private = internals;
436         data->port_id = eth_dev->data->port_id;
437         data->nb_rx_queues = (uint16_t)nb_rx_queues;
438         data->nb_tx_queues = (uint16_t)nb_tx_queues;
439         data->dev_link = pmd_link;
440         data->mac_addrs = &eth_addr;
441         strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
442
443         eth_dev->data = data;
444         eth_dev->dev_ops = &ops;
445         eth_dev->pci_dev = pci_dev;
446         eth_dev->driver = &rte_null_pmd;
447
448         /* finally assign rx and tx ops */
449         if (packet_copy) {
450                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
451                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
452         } else {
453                 eth_dev->rx_pkt_burst = eth_null_rx;
454                 eth_dev->tx_pkt_burst = eth_null_tx;
455         }
456
457         return 0;
458
459 error:
460         if (data)
461                 rte_free(data);
462         if (pci_dev)
463                 rte_free(pci_dev);
464         if (internals)
465                 rte_free(internals);
466         return -1;
467 }
468
469 static inline int
470 get_packet_size_arg(const char *key __rte_unused,
471                 const char *value, void *extra_args)
472 {
473         const char *a = value;
474         unsigned *packet_size = extra_args;
475
476         if ((value == NULL) || (extra_args == NULL))
477                 return -EINVAL;
478
479         *packet_size = (unsigned)strtoul(a, NULL, 0);
480         if (*packet_size == UINT_MAX)
481                 return -1;
482
483         return 0;
484 }
485
486 static inline int
487 get_packet_copy_arg(const char *key __rte_unused,
488                 const char *value, void *extra_args)
489 {
490         const char *a = value;
491         unsigned *packet_copy = extra_args;
492
493         if ((value == NULL) || (extra_args == NULL))
494                 return -EINVAL;
495
496         *packet_copy = (unsigned)strtoul(a, NULL, 0);
497         if (*packet_copy == UINT_MAX)
498                 return -1;
499
500         return 0;
501 }
502
503 static int
504 rte_pmd_null_devinit(const char *name, const char *params)
505 {
506         unsigned numa_node;
507         unsigned packet_size = default_packet_size;
508         unsigned packet_copy = default_packet_copy;
509         struct rte_kvargs *kvlist = NULL;
510         int ret;
511
512         if (name == NULL)
513                 return -EINVAL;
514
515         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
516
517         numa_node = rte_socket_id();
518
519         if (params != NULL) {
520                 kvlist = rte_kvargs_parse(params, valid_arguments);
521                 if (kvlist == NULL)
522                         return -1;
523
524                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
525
526                         ret = rte_kvargs_process(kvlist,
527                                         ETH_NULL_PACKET_SIZE_ARG,
528                                         &get_packet_size_arg, &packet_size);
529                         if (ret < 0)
530                                 goto free_kvlist;
531                 }
532
533                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
534
535                         ret = rte_kvargs_process(kvlist,
536                                         ETH_NULL_PACKET_COPY_ARG,
537                                         &get_packet_copy_arg, &packet_copy);
538                         if (ret < 0)
539                                 goto free_kvlist;
540                 }
541         }
542
543         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
544                         "packet copy is %s\n", packet_size,
545                         packet_copy ? "enabled" : "disabled");
546
547         ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
548
549 free_kvlist:
550         if (kvlist)
551                 rte_kvargs_free(kvlist);
552         return ret;
553 }
554
555 static int
556 rte_pmd_null_devuninit(const char *name)
557 {
558         struct rte_eth_dev *eth_dev = NULL;
559
560         if (name == NULL)
561                 return -EINVAL;
562
563         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
564                         rte_socket_id());
565
566         /* reserve an ethdev entry */
567         eth_dev = rte_eth_dev_allocated(name);
568         if (eth_dev == NULL)
569                 return -1;
570
571         rte_free(eth_dev->data->dev_private);
572         rte_free(eth_dev->data);
573         rte_free(eth_dev->pci_dev);
574
575         rte_eth_dev_release_port(eth_dev);
576
577         return 0;
578 }
579
580 static struct rte_driver pmd_null_drv = {
581         .name = "eth_null",
582         .type = PMD_VDEV,
583         .init = rte_pmd_null_devinit,
584         .uninit = rte_pmd_null_devuninit,
585 };
586
587 PMD_REGISTER_DRIVER(pmd_null_drv);