e2ff41a229682d1483ee796eaf7b201c51649f17
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) IGEL Co.,Ltd.
3  *  All rights reserved.
4  */
5
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
14
15 #define ETH_NULL_PACKET_SIZE_ARG        "size"
16 #define ETH_NULL_PACKET_COPY_ARG        "copy"
17
18 static unsigned default_packet_size = 64;
19 static unsigned default_packet_copy;
20
21 static const char *valid_arguments[] = {
22         ETH_NULL_PACKET_SIZE_ARG,
23         ETH_NULL_PACKET_COPY_ARG,
24         NULL
25 };
26
27 struct pmd_internals;
28
29 struct null_queue {
30         struct pmd_internals *internals;
31
32         struct rte_mempool *mb_pool;
33         struct rte_mbuf *dummy_packet;
34
35         rte_atomic64_t rx_pkts;
36         rte_atomic64_t tx_pkts;
37 };
38
39 struct pmd_internals {
40         unsigned packet_size;
41         unsigned packet_copy;
42         uint16_t port_id;
43
44         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
45         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
46
47         struct rte_ether_addr eth_addr;
48         /** Bit mask of RSS offloads, the bit offset also means flow type */
49         uint64_t flow_type_rss_offloads;
50
51         rte_spinlock_t rss_lock;
52
53         uint16_t reta_size;
54         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
55                         RTE_RETA_GROUP_SIZE];
56
57         uint8_t rss_key[40];                /**< 40-byte hash key. */
58 };
59 static struct rte_eth_link pmd_link = {
60         .link_speed = ETH_SPEED_NUM_10G,
61         .link_duplex = ETH_LINK_FULL_DUPLEX,
62         .link_status = ETH_LINK_DOWN,
63         .link_autoneg = ETH_LINK_FIXED,
64 };
65
66 static int eth_null_logtype;
67
68 #define PMD_LOG(level, fmt, args...) \
69         rte_log(RTE_LOG_ ## level, eth_null_logtype, \
70                 "%s(): " fmt "\n", __func__, ##args)
71
72 static uint16_t
73 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
74 {
75         int i;
76         struct null_queue *h = q;
77         unsigned packet_size;
78
79         if ((q == NULL) || (bufs == NULL))
80                 return 0;
81
82         packet_size = h->internals->packet_size;
83         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
84                 return 0;
85
86         for (i = 0; i < nb_bufs; i++) {
87                 bufs[i]->data_len = (uint16_t)packet_size;
88                 bufs[i]->pkt_len = packet_size;
89                 bufs[i]->port = h->internals->port_id;
90         }
91
92         rte_atomic64_add(&(h->rx_pkts), i);
93
94         return i;
95 }
96
97 static uint16_t
98 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
99 {
100         int i;
101         struct null_queue *h = q;
102         unsigned packet_size;
103
104         if ((q == NULL) || (bufs == NULL))
105                 return 0;
106
107         packet_size = h->internals->packet_size;
108         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
109                 return 0;
110
111         for (i = 0; i < nb_bufs; i++) {
112                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
113                                         packet_size);
114                 bufs[i]->data_len = (uint16_t)packet_size;
115                 bufs[i]->pkt_len = packet_size;
116                 bufs[i]->port = h->internals->port_id;
117         }
118
119         rte_atomic64_add(&(h->rx_pkts), i);
120
121         return i;
122 }
123
124 static uint16_t
125 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
126 {
127         int i;
128         struct null_queue *h = q;
129
130         if ((q == NULL) || (bufs == NULL))
131                 return 0;
132
133         for (i = 0; i < nb_bufs; i++)
134                 rte_pktmbuf_free(bufs[i]);
135
136         rte_atomic64_add(&(h->tx_pkts), i);
137
138         return i;
139 }
140
141 static uint16_t
142 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143 {
144         int i;
145         struct null_queue *h = q;
146         unsigned packet_size;
147
148         if ((q == NULL) || (bufs == NULL))
149                 return 0;
150
151         packet_size = h->internals->packet_size;
152         for (i = 0; i < nb_bufs; i++) {
153                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
154                                         packet_size);
155                 rte_pktmbuf_free(bufs[i]);
156         }
157
158         rte_atomic64_add(&(h->tx_pkts), i);
159
160         return i;
161 }
162
163 static int
164 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
165 {
166         return 0;
167 }
168
169 static int
170 eth_dev_start(struct rte_eth_dev *dev)
171 {
172         if (dev == NULL)
173                 return -EINVAL;
174
175         dev->data->dev_link.link_status = ETH_LINK_UP;
176         return 0;
177 }
178
179 static void
180 eth_dev_stop(struct rte_eth_dev *dev)
181 {
182         if (dev == NULL)
183                 return;
184
185         dev->data->dev_link.link_status = ETH_LINK_DOWN;
186 }
187
188 static int
189 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
190                 uint16_t nb_rx_desc __rte_unused,
191                 unsigned int socket_id __rte_unused,
192                 const struct rte_eth_rxconf *rx_conf __rte_unused,
193                 struct rte_mempool *mb_pool)
194 {
195         struct rte_mbuf *dummy_packet;
196         struct pmd_internals *internals;
197         unsigned packet_size;
198
199         if ((dev == NULL) || (mb_pool == NULL))
200                 return -EINVAL;
201
202         internals = dev->data->dev_private;
203
204         if (rx_queue_id >= dev->data->nb_rx_queues)
205                 return -ENODEV;
206
207         packet_size = internals->packet_size;
208
209         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
210         dev->data->rx_queues[rx_queue_id] =
211                 &internals->rx_null_queues[rx_queue_id];
212         dummy_packet = rte_zmalloc_socket(NULL,
213                         packet_size, 0, dev->data->numa_node);
214         if (dummy_packet == NULL)
215                 return -ENOMEM;
216
217         internals->rx_null_queues[rx_queue_id].internals = internals;
218         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
219
220         return 0;
221 }
222
223 static int
224 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
225                 uint16_t nb_tx_desc __rte_unused,
226                 unsigned int socket_id __rte_unused,
227                 const struct rte_eth_txconf *tx_conf __rte_unused)
228 {
229         struct rte_mbuf *dummy_packet;
230         struct pmd_internals *internals;
231         unsigned packet_size;
232
233         if (dev == NULL)
234                 return -EINVAL;
235
236         internals = dev->data->dev_private;
237
238         if (tx_queue_id >= dev->data->nb_tx_queues)
239                 return -ENODEV;
240
241         packet_size = internals->packet_size;
242
243         dev->data->tx_queues[tx_queue_id] =
244                 &internals->tx_null_queues[tx_queue_id];
245         dummy_packet = rte_zmalloc_socket(NULL,
246                         packet_size, 0, dev->data->numa_node);
247         if (dummy_packet == NULL)
248                 return -ENOMEM;
249
250         internals->tx_null_queues[tx_queue_id].internals = internals;
251         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
252
253         return 0;
254 }
255
256 static int
257 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
258 {
259         return 0;
260 }
261
262 static int
263 eth_dev_info(struct rte_eth_dev *dev,
264                 struct rte_eth_dev_info *dev_info)
265 {
266         struct pmd_internals *internals;
267
268         if ((dev == NULL) || (dev_info == NULL))
269                 return -EINVAL;
270
271         internals = dev->data->dev_private;
272         dev_info->max_mac_addrs = 1;
273         dev_info->max_rx_pktlen = (uint32_t)-1;
274         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
275         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
276         dev_info->min_rx_bufsize = 0;
277         dev_info->reta_size = internals->reta_size;
278         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
279
280         return 0;
281 }
282
283 static int
284 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
285 {
286         unsigned i, num_stats;
287         unsigned long rx_total = 0, tx_total = 0;
288         const struct pmd_internals *internal;
289
290         if ((dev == NULL) || (igb_stats == NULL))
291                 return -EINVAL;
292
293         internal = dev->data->dev_private;
294         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
295                         RTE_MIN(dev->data->nb_rx_queues,
296                                 RTE_DIM(internal->rx_null_queues)));
297         for (i = 0; i < num_stats; i++) {
298                 igb_stats->q_ipackets[i] =
299                         internal->rx_null_queues[i].rx_pkts.cnt;
300                 rx_total += igb_stats->q_ipackets[i];
301         }
302
303         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
304                         RTE_MIN(dev->data->nb_tx_queues,
305                                 RTE_DIM(internal->tx_null_queues)));
306         for (i = 0; i < num_stats; i++) {
307                 igb_stats->q_opackets[i] =
308                         internal->tx_null_queues[i].tx_pkts.cnt;
309                 tx_total += igb_stats->q_opackets[i];
310         }
311
312         igb_stats->ipackets = rx_total;
313         igb_stats->opackets = tx_total;
314
315         return 0;
316 }
317
318 static int
319 eth_stats_reset(struct rte_eth_dev *dev)
320 {
321         unsigned i;
322         struct pmd_internals *internal;
323
324         if (dev == NULL)
325                 return -EINVAL;
326
327         internal = dev->data->dev_private;
328         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
329                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
330         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
331                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
332
333         return 0;
334 }
335
336 static void
337 eth_queue_release(void *q)
338 {
339         struct null_queue *nq;
340
341         if (q == NULL)
342                 return;
343
344         nq = q;
345         rte_free(nq->dummy_packet);
346 }
347
348 static int
349 eth_link_update(struct rte_eth_dev *dev __rte_unused,
350                 int wait_to_complete __rte_unused) { return 0; }
351
352 static int
353 eth_rss_reta_update(struct rte_eth_dev *dev,
354                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
355 {
356         int i, j;
357         struct pmd_internals *internal = dev->data->dev_private;
358
359         if (reta_size != internal->reta_size)
360                 return -EINVAL;
361
362         rte_spinlock_lock(&internal->rss_lock);
363
364         /* Copy RETA table */
365         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
366                 internal->reta_conf[i].mask = reta_conf[i].mask;
367                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
368                         if ((reta_conf[i].mask >> j) & 0x01)
369                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
370         }
371
372         rte_spinlock_unlock(&internal->rss_lock);
373
374         return 0;
375 }
376
377 static int
378 eth_rss_reta_query(struct rte_eth_dev *dev,
379                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
380 {
381         int i, j;
382         struct pmd_internals *internal = dev->data->dev_private;
383
384         if (reta_size != internal->reta_size)
385                 return -EINVAL;
386
387         rte_spinlock_lock(&internal->rss_lock);
388
389         /* Copy RETA table */
390         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
391                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
392                         if ((reta_conf[i].mask >> j) & 0x01)
393                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
394         }
395
396         rte_spinlock_unlock(&internal->rss_lock);
397
398         return 0;
399 }
400
401 static int
402 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
403 {
404         struct pmd_internals *internal = dev->data->dev_private;
405
406         rte_spinlock_lock(&internal->rss_lock);
407
408         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
409                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
410                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
411
412         if (rss_conf->rss_key)
413                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
414
415         rte_spinlock_unlock(&internal->rss_lock);
416
417         return 0;
418 }
419
420 static int
421 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
422                 struct rte_eth_rss_conf *rss_conf)
423 {
424         struct pmd_internals *internal = dev->data->dev_private;
425
426         rte_spinlock_lock(&internal->rss_lock);
427
428         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
429         if (rss_conf->rss_key)
430                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
431
432         rte_spinlock_unlock(&internal->rss_lock);
433
434         return 0;
435 }
436
437 static int
438 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
439                     __rte_unused struct rte_ether_addr *addr)
440 {
441         return 0;
442 }
443
444 static const struct eth_dev_ops ops = {
445         .dev_start = eth_dev_start,
446         .dev_stop = eth_dev_stop,
447         .dev_configure = eth_dev_configure,
448         .dev_infos_get = eth_dev_info,
449         .rx_queue_setup = eth_rx_queue_setup,
450         .tx_queue_setup = eth_tx_queue_setup,
451         .rx_queue_release = eth_queue_release,
452         .tx_queue_release = eth_queue_release,
453         .mtu_set = eth_mtu_set,
454         .link_update = eth_link_update,
455         .mac_addr_set = eth_mac_address_set,
456         .stats_get = eth_stats_get,
457         .stats_reset = eth_stats_reset,
458         .reta_update = eth_rss_reta_update,
459         .reta_query = eth_rss_reta_query,
460         .rss_hash_update = eth_rss_hash_update,
461         .rss_hash_conf_get = eth_rss_hash_conf_get
462 };
463
464 static int
465 eth_dev_null_create(struct rte_vdev_device *dev,
466                 unsigned packet_size,
467                 unsigned packet_copy)
468 {
469         const unsigned nb_rx_queues = 1;
470         const unsigned nb_tx_queues = 1;
471         struct rte_eth_dev_data *data;
472         struct pmd_internals *internals = NULL;
473         struct rte_eth_dev *eth_dev = NULL;
474
475         static const uint8_t default_rss_key[40] = {
476                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
477                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
478                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
479                 0xBE, 0xAC, 0x01, 0xFA
480         };
481
482         if (dev->device.numa_node == SOCKET_ID_ANY)
483                 dev->device.numa_node = rte_socket_id();
484
485         PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
486                 dev->device.numa_node);
487
488         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
489         if (!eth_dev)
490                 return -ENOMEM;
491
492         /* now put it all together
493          * - store queue data in internals,
494          * - store numa_node info in ethdev data
495          * - point eth_dev_data to internals
496          * - and point eth_dev structure to new eth_dev_data structure
497          */
498         /* NOTE: we'll replace the data element, of originally allocated eth_dev
499          * so the nulls are local per-process */
500
501         internals = eth_dev->data->dev_private;
502         internals->packet_size = packet_size;
503         internals->packet_copy = packet_copy;
504         internals->port_id = eth_dev->data->port_id;
505         rte_eth_random_addr(internals->eth_addr.addr_bytes);
506
507         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
508         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
509
510         rte_memcpy(internals->rss_key, default_rss_key, 40);
511
512         data = eth_dev->data;
513         data->nb_rx_queues = (uint16_t)nb_rx_queues;
514         data->nb_tx_queues = (uint16_t)nb_tx_queues;
515         data->dev_link = pmd_link;
516         data->mac_addrs = &internals->eth_addr;
517
518         eth_dev->dev_ops = &ops;
519
520         /* finally assign rx and tx ops */
521         if (packet_copy) {
522                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
523                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
524         } else {
525                 eth_dev->rx_pkt_burst = eth_null_rx;
526                 eth_dev->tx_pkt_burst = eth_null_tx;
527         }
528
529         rte_eth_dev_probing_finish(eth_dev);
530         return 0;
531 }
532
533 static inline int
534 get_packet_size_arg(const char *key __rte_unused,
535                 const char *value, void *extra_args)
536 {
537         const char *a = value;
538         unsigned *packet_size = extra_args;
539
540         if ((value == NULL) || (extra_args == NULL))
541                 return -EINVAL;
542
543         *packet_size = (unsigned)strtoul(a, NULL, 0);
544         if (*packet_size == UINT_MAX)
545                 return -1;
546
547         return 0;
548 }
549
550 static inline int
551 get_packet_copy_arg(const char *key __rte_unused,
552                 const char *value, void *extra_args)
553 {
554         const char *a = value;
555         unsigned *packet_copy = extra_args;
556
557         if ((value == NULL) || (extra_args == NULL))
558                 return -EINVAL;
559
560         *packet_copy = (unsigned)strtoul(a, NULL, 0);
561         if (*packet_copy == UINT_MAX)
562                 return -1;
563
564         return 0;
565 }
566
567 static int
568 rte_pmd_null_probe(struct rte_vdev_device *dev)
569 {
570         const char *name, *params;
571         unsigned packet_size = default_packet_size;
572         unsigned packet_copy = default_packet_copy;
573         struct rte_kvargs *kvlist = NULL;
574         struct rte_eth_dev *eth_dev;
575         int ret;
576
577         if (!dev)
578                 return -EINVAL;
579
580         name = rte_vdev_device_name(dev);
581         params = rte_vdev_device_args(dev);
582         PMD_LOG(INFO, "Initializing pmd_null for %s", name);
583
584         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
585                 eth_dev = rte_eth_dev_attach_secondary(name);
586                 if (!eth_dev) {
587                         PMD_LOG(ERR, "Failed to probe %s", name);
588                         return -1;
589                 }
590                 /* TODO: request info from primary to set up Rx and Tx */
591                 eth_dev->dev_ops = &ops;
592                 eth_dev->device = &dev->device;
593                 rte_eth_dev_probing_finish(eth_dev);
594                 return 0;
595         }
596
597         if (params != NULL) {
598                 kvlist = rte_kvargs_parse(params, valid_arguments);
599                 if (kvlist == NULL)
600                         return -1;
601
602                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
603
604                         ret = rte_kvargs_process(kvlist,
605                                         ETH_NULL_PACKET_SIZE_ARG,
606                                         &get_packet_size_arg, &packet_size);
607                         if (ret < 0)
608                                 goto free_kvlist;
609                 }
610
611                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
612
613                         ret = rte_kvargs_process(kvlist,
614                                         ETH_NULL_PACKET_COPY_ARG,
615                                         &get_packet_copy_arg, &packet_copy);
616                         if (ret < 0)
617                                 goto free_kvlist;
618                 }
619         }
620
621         PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
622                         "packet copy is %s", packet_size,
623                         packet_copy ? "enabled" : "disabled");
624
625         ret = eth_dev_null_create(dev, packet_size, packet_copy);
626
627 free_kvlist:
628         if (kvlist)
629                 rte_kvargs_free(kvlist);
630         return ret;
631 }
632
633 static int
634 rte_pmd_null_remove(struct rte_vdev_device *dev)
635 {
636         struct rte_eth_dev *eth_dev = NULL;
637
638         if (!dev)
639                 return -EINVAL;
640
641         PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
642                         rte_socket_id());
643
644         /* find the ethdev entry */
645         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
646         if (eth_dev == NULL)
647                 return -1;
648
649         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
650                 /* mac_addrs must not be freed alone because part of dev_private */
651                 eth_dev->data->mac_addrs = NULL;
652
653         rte_eth_dev_release_port(eth_dev);
654
655         return 0;
656 }
657
658 static struct rte_vdev_driver pmd_null_drv = {
659         .probe = rte_pmd_null_probe,
660         .remove = rte_pmd_null_remove,
661 };
662
663 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
664 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
665 RTE_PMD_REGISTER_PARAM_STRING(net_null,
666         "size=<int> "
667         "copy=<int>");
668
669 RTE_INIT(eth_null_init_log)
670 {
671         eth_null_logtype = rte_log_register("pmd.net.null");
672         if (eth_null_logtype >= 0)
673                 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
674 }