null: extend number of virtual queues
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_dev.h>
39 #include <rte_kvargs.h>
40
41 #define ETH_NULL_PACKET_SIZE_ARG        "size"
42 #define ETH_NULL_PACKET_COPY_ARG        "copy"
43
44 static unsigned default_packet_size = 64;
45 static unsigned default_packet_copy;
46
47 static const char *valid_arguments[] = {
48         ETH_NULL_PACKET_SIZE_ARG,
49         ETH_NULL_PACKET_COPY_ARG,
50         NULL
51 };
52
53 struct pmd_internals;
54
55 struct null_queue {
56         struct pmd_internals *internals;
57
58         struct rte_mempool *mb_pool;
59         struct rte_mbuf *dummy_packet;
60
61         rte_atomic64_t rx_pkts;
62         rte_atomic64_t tx_pkts;
63         rte_atomic64_t err_pkts;
64 };
65
66 struct pmd_internals {
67         unsigned packet_size;
68         unsigned packet_copy;
69         unsigned numa_node;
70
71         unsigned nb_rx_queues;
72         unsigned nb_tx_queues;
73
74         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76 };
77
78
79 static struct ether_addr eth_addr = { .addr_bytes = {0} };
80 static const char *drivername = "Null PMD";
81 static struct rte_eth_link pmd_link = {
82         .link_speed = 10000,
83         .link_duplex = ETH_LINK_FULL_DUPLEX,
84         .link_status = 0
85 };
86
87 static uint16_t
88 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
89 {
90         int i;
91         struct null_queue *h = q;
92         unsigned packet_size;
93
94         if ((q == NULL) || (bufs == NULL))
95                 return 0;
96
97         packet_size = h->internals->packet_size;
98         for (i = 0; i < nb_bufs; i++) {
99                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
100                 if (!bufs[i])
101                         break;
102                 bufs[i]->data_len = (uint16_t)packet_size;
103                 bufs[i]->pkt_len = packet_size;
104                 bufs[i]->nb_segs = 1;
105                 bufs[i]->next = NULL;
106         }
107
108         rte_atomic64_add(&(h->rx_pkts), i);
109
110         return i;
111 }
112
113 static uint16_t
114 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
115 {
116         int i;
117         struct null_queue *h = q;
118         unsigned packet_size;
119
120         if ((q == NULL) || (bufs == NULL))
121                 return 0;
122
123         packet_size = h->internals->packet_size;
124         for (i = 0; i < nb_bufs; i++) {
125                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
126                 if (!bufs[i])
127                         break;
128                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
129                                         packet_size);
130                 bufs[i]->data_len = (uint16_t)packet_size;
131                 bufs[i]->pkt_len = packet_size;
132                 bufs[i]->nb_segs = 1;
133                 bufs[i]->next = NULL;
134         }
135
136         rte_atomic64_add(&(h->rx_pkts), i);
137
138         return i;
139 }
140
141 static uint16_t
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143 {
144         int i;
145         struct null_queue *h = q;
146
147         if ((q == NULL) || (bufs == NULL))
148                 return 0;
149
150         for (i = 0; i < nb_bufs; i++)
151                 rte_pktmbuf_free(bufs[i]);
152
153         rte_atomic64_add(&(h->tx_pkts), i);
154
155         return i;
156 }
157
158 static uint16_t
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
160 {
161         int i;
162         struct null_queue *h = q;
163         unsigned packet_size;
164
165         if ((q == NULL) || (bufs == NULL))
166                 return 0;
167
168         packet_size = h->internals->packet_size;
169         for (i = 0; i < nb_bufs; i++) {
170                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
171                                         packet_size);
172                 rte_pktmbuf_free(bufs[i]);
173         }
174
175         rte_atomic64_add(&(h->tx_pkts), i);
176
177         return i;
178 }
179
180 static int
181 eth_dev_configure(struct rte_eth_dev *dev) {
182         struct pmd_internals *internals;
183
184         internals = dev->data->dev_private;
185         internals->nb_rx_queues = dev->data->nb_rx_queues;
186         internals->nb_tx_queues = dev->data->nb_tx_queues;
187
188         return 0;
189 }
190
191 static int
192 eth_dev_start(struct rte_eth_dev *dev)
193 {
194         if (dev == NULL)
195                 return -EINVAL;
196
197         dev->data->dev_link.link_status = 1;
198         return 0;
199 }
200
201 static void
202 eth_dev_stop(struct rte_eth_dev *dev)
203 {
204         if (dev == NULL)
205                 return;
206
207         dev->data->dev_link.link_status = 0;
208 }
209
210 static int
211 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
212                 uint16_t nb_rx_desc __rte_unused,
213                 unsigned int socket_id __rte_unused,
214                 const struct rte_eth_rxconf *rx_conf __rte_unused,
215                 struct rte_mempool *mb_pool)
216 {
217         struct rte_mbuf *dummy_packet;
218         struct pmd_internals *internals;
219         unsigned packet_size;
220
221         if ((dev == NULL) || (mb_pool == NULL))
222                 return -EINVAL;
223
224         internals = dev->data->dev_private;
225
226         if (rx_queue_id >= internals->nb_rx_queues)
227                 return -ENODEV;
228
229         packet_size = internals->packet_size;
230
231         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
232         dev->data->rx_queues[rx_queue_id] =
233                 &internals->rx_null_queues[rx_queue_id];
234         dummy_packet = rte_zmalloc_socket(NULL,
235                         packet_size, 0, internals->numa_node);
236         if (dummy_packet == NULL)
237                 return -ENOMEM;
238
239         internals->rx_null_queues[rx_queue_id].internals = internals;
240         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
241
242         return 0;
243 }
244
245 static int
246 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
247                 uint16_t nb_tx_desc __rte_unused,
248                 unsigned int socket_id __rte_unused,
249                 const struct rte_eth_txconf *tx_conf __rte_unused)
250 {
251         struct rte_mbuf *dummy_packet;
252         struct pmd_internals *internals;
253         unsigned packet_size;
254
255         if (dev == NULL)
256                 return -EINVAL;
257
258         internals = dev->data->dev_private;
259
260         if (tx_queue_id >= internals->nb_tx_queues)
261                 return -ENODEV;
262
263         packet_size = internals->packet_size;
264
265         dev->data->tx_queues[tx_queue_id] =
266                 &internals->tx_null_queues[tx_queue_id];
267         dummy_packet = rte_zmalloc_socket(NULL,
268                         packet_size, 0, internals->numa_node);
269         if (dummy_packet == NULL)
270                 return -ENOMEM;
271
272         internals->tx_null_queues[tx_queue_id].internals = internals;
273         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
274
275         return 0;
276 }
277
278
279 static void
280 eth_dev_info(struct rte_eth_dev *dev,
281                 struct rte_eth_dev_info *dev_info)
282 {
283         struct pmd_internals *internals;
284
285         if ((dev == NULL) || (dev_info == NULL))
286                 return;
287
288         internals = dev->data->dev_private;
289         dev_info->driver_name = drivername;
290         dev_info->max_mac_addrs = 1;
291         dev_info->max_rx_pktlen = (uint32_t)-1;
292         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
293         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
294         dev_info->min_rx_bufsize = 0;
295         dev_info->pci_dev = NULL;
296 }
297
298 static void
299 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
300 {
301         unsigned i, num_stats;
302         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
303         const struct pmd_internals *internal;
304
305         if ((dev == NULL) || (igb_stats == NULL))
306                 return;
307
308         internal = dev->data->dev_private;
309         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
310                         RTE_MIN(internal->nb_rx_queues,
311                                 RTE_DIM(internal->rx_null_queues)));
312         for (i = 0; i < num_stats; i++) {
313                 igb_stats->q_ipackets[i] =
314                         internal->rx_null_queues[i].rx_pkts.cnt;
315                 rx_total += igb_stats->q_ipackets[i];
316         }
317
318         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
319                         RTE_MIN(internal->nb_tx_queues,
320                                 RTE_DIM(internal->tx_null_queues)));
321         for (i = 0; i < num_stats; i++) {
322                 igb_stats->q_opackets[i] =
323                         internal->tx_null_queues[i].tx_pkts.cnt;
324                 igb_stats->q_errors[i] =
325                         internal->tx_null_queues[i].err_pkts.cnt;
326                 tx_total += igb_stats->q_opackets[i];
327                 tx_err_total += igb_stats->q_errors[i];
328         }
329
330         igb_stats->ipackets = rx_total;
331         igb_stats->opackets = tx_total;
332         igb_stats->oerrors = tx_err_total;
333 }
334
335 static void
336 eth_stats_reset(struct rte_eth_dev *dev)
337 {
338         unsigned i;
339         struct pmd_internals *internal;
340
341         if (dev == NULL)
342                 return;
343
344         internal = dev->data->dev_private;
345         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
346                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
347         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
348                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
349                 internal->tx_null_queues[i].err_pkts.cnt = 0;
350         }
351 }
352
353 static struct eth_driver rte_null_pmd = {
354         .pci_drv = {
355                 .name = "rte_null_pmd",
356                 .drv_flags = RTE_PCI_DRV_DETACHABLE,
357         },
358 };
359
360 static void
361 eth_queue_release(void *q)
362 {
363         struct null_queue *nq;
364
365         if (q == NULL)
366                 return;
367
368         nq = q;
369         rte_free(nq->dummy_packet);
370 }
371
372 static int
373 eth_link_update(struct rte_eth_dev *dev __rte_unused,
374                 int wait_to_complete __rte_unused) { return 0; }
375
376 static const struct eth_dev_ops ops = {
377         .dev_start = eth_dev_start,
378         .dev_stop = eth_dev_stop,
379         .dev_configure = eth_dev_configure,
380         .dev_infos_get = eth_dev_info,
381         .rx_queue_setup = eth_rx_queue_setup,
382         .tx_queue_setup = eth_tx_queue_setup,
383         .rx_queue_release = eth_queue_release,
384         .tx_queue_release = eth_queue_release,
385         .link_update = eth_link_update,
386         .stats_get = eth_stats_get,
387         .stats_reset = eth_stats_reset,
388 };
389
390 static int
391 eth_dev_null_create(const char *name,
392                 const unsigned numa_node,
393                 unsigned packet_size,
394                 unsigned packet_copy)
395 {
396         const unsigned nb_rx_queues = 1;
397         const unsigned nb_tx_queues = 1;
398         struct rte_eth_dev_data *data = NULL;
399         struct rte_pci_device *pci_dev = NULL;
400         struct pmd_internals *internals = NULL;
401         struct rte_eth_dev *eth_dev = NULL;
402
403         if (name == NULL)
404                 return -EINVAL;
405
406         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
407                         numa_node);
408
409         /* now do all data allocation - for eth_dev structure, dummy pci driver
410          * and internal (private) data
411          */
412         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
413         if (data == NULL)
414                 goto error;
415
416         pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
417         if (pci_dev == NULL)
418                 goto error;
419
420         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
421         if (internals == NULL)
422                 goto error;
423
424         /* reserve an ethdev entry */
425         eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
426         if (eth_dev == NULL)
427                 goto error;
428
429         /* now put it all together
430          * - store queue data in internals,
431          * - store numa_node info in pci_driver
432          * - point eth_dev_data to internals and pci_driver
433          * - and point eth_dev structure to new eth_dev_data structure
434          */
435         /* NOTE: we'll replace the data element, of originally allocated eth_dev
436          * so the nulls are local per-process */
437
438         internals->nb_rx_queues = nb_rx_queues;
439         internals->nb_tx_queues = nb_tx_queues;
440         internals->packet_size = packet_size;
441         internals->packet_copy = packet_copy;
442         internals->numa_node = numa_node;
443
444         pci_dev->numa_node = numa_node;
445         pci_dev->driver = &rte_null_pmd.pci_drv;
446
447         data->dev_private = internals;
448         data->port_id = eth_dev->data->port_id;
449         data->nb_rx_queues = (uint16_t)nb_rx_queues;
450         data->nb_tx_queues = (uint16_t)nb_tx_queues;
451         data->dev_link = pmd_link;
452         data->mac_addrs = &eth_addr;
453         strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
454
455         eth_dev->data = data;
456         eth_dev->dev_ops = &ops;
457         eth_dev->pci_dev = pci_dev;
458         eth_dev->driver = &rte_null_pmd;
459         TAILQ_INIT(&eth_dev->link_intr_cbs);
460
461         /* finally assign rx and tx ops */
462         if (packet_copy) {
463                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
464                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
465         } else {
466                 eth_dev->rx_pkt_burst = eth_null_rx;
467                 eth_dev->tx_pkt_burst = eth_null_tx;
468         }
469
470         return 0;
471
472 error:
473         rte_free(data);
474         rte_free(pci_dev);
475         rte_free(internals);
476
477         return -1;
478 }
479
480 static inline int
481 get_packet_size_arg(const char *key __rte_unused,
482                 const char *value, void *extra_args)
483 {
484         const char *a = value;
485         unsigned *packet_size = extra_args;
486
487         if ((value == NULL) || (extra_args == NULL))
488                 return -EINVAL;
489
490         *packet_size = (unsigned)strtoul(a, NULL, 0);
491         if (*packet_size == UINT_MAX)
492                 return -1;
493
494         return 0;
495 }
496
497 static inline int
498 get_packet_copy_arg(const char *key __rte_unused,
499                 const char *value, void *extra_args)
500 {
501         const char *a = value;
502         unsigned *packet_copy = extra_args;
503
504         if ((value == NULL) || (extra_args == NULL))
505                 return -EINVAL;
506
507         *packet_copy = (unsigned)strtoul(a, NULL, 0);
508         if (*packet_copy == UINT_MAX)
509                 return -1;
510
511         return 0;
512 }
513
514 static int
515 rte_pmd_null_devinit(const char *name, const char *params)
516 {
517         unsigned numa_node;
518         unsigned packet_size = default_packet_size;
519         unsigned packet_copy = default_packet_copy;
520         struct rte_kvargs *kvlist = NULL;
521         int ret;
522
523         if (name == NULL)
524                 return -EINVAL;
525
526         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
527
528         numa_node = rte_socket_id();
529
530         if (params != NULL) {
531                 kvlist = rte_kvargs_parse(params, valid_arguments);
532                 if (kvlist == NULL)
533                         return -1;
534
535                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
536
537                         ret = rte_kvargs_process(kvlist,
538                                         ETH_NULL_PACKET_SIZE_ARG,
539                                         &get_packet_size_arg, &packet_size);
540                         if (ret < 0)
541                                 goto free_kvlist;
542                 }
543
544                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
545
546                         ret = rte_kvargs_process(kvlist,
547                                         ETH_NULL_PACKET_COPY_ARG,
548                                         &get_packet_copy_arg, &packet_copy);
549                         if (ret < 0)
550                                 goto free_kvlist;
551                 }
552         }
553
554         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
555                         "packet copy is %s\n", packet_size,
556                         packet_copy ? "enabled" : "disabled");
557
558         ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
559
560 free_kvlist:
561         if (kvlist)
562                 rte_kvargs_free(kvlist);
563         return ret;
564 }
565
566 static int
567 rte_pmd_null_devuninit(const char *name)
568 {
569         struct rte_eth_dev *eth_dev = NULL;
570
571         if (name == NULL)
572                 return -EINVAL;
573
574         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
575                         rte_socket_id());
576
577         /* reserve an ethdev entry */
578         eth_dev = rte_eth_dev_allocated(name);
579         if (eth_dev == NULL)
580                 return -1;
581
582         rte_free(eth_dev->data->dev_private);
583         rte_free(eth_dev->data);
584         rte_free(eth_dev->pci_dev);
585
586         rte_eth_dev_release_port(eth_dev);
587
588         return 0;
589 }
590
591 static struct rte_driver pmd_null_drv = {
592         .name = "eth_null",
593         .type = PMD_VDEV,
594         .init = rte_pmd_null_devinit,
595         .uninit = rte_pmd_null_devuninit,
596 };
597
598 PMD_REGISTER_DRIVER(pmd_null_drv);