2c08ebf8cdc35d661ab419684ce480adcd7fda91
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) IGEL Co.,Ltd.
3  *  All rights reserved.
4  */
5
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
14
15 #define ETH_NULL_PACKET_SIZE_ARG        "size"
16 #define ETH_NULL_PACKET_COPY_ARG        "copy"
17
18 static unsigned int default_packet_size = 64;
19 static unsigned int default_packet_copy;
20
21 static const char *valid_arguments[] = {
22         ETH_NULL_PACKET_SIZE_ARG,
23         ETH_NULL_PACKET_COPY_ARG,
24         NULL
25 };
26
27 struct pmd_internals;
28
29 struct null_queue {
30         struct pmd_internals *internals;
31
32         struct rte_mempool *mb_pool;
33         struct rte_mbuf *dummy_packet;
34
35         rte_atomic64_t rx_pkts;
36         rte_atomic64_t tx_pkts;
37 };
38
39 struct pmd_options {
40         unsigned int packet_copy;
41         unsigned int packet_size;
42 };
43
44 struct pmd_internals {
45         unsigned int packet_size;
46         unsigned int packet_copy;
47         uint16_t port_id;
48
49         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
50         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
51
52         struct rte_ether_addr eth_addr;
53         /** Bit mask of RSS offloads, the bit offset also means flow type */
54         uint64_t flow_type_rss_offloads;
55
56         rte_spinlock_t rss_lock;
57
58         uint16_t reta_size;
59         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
60                         RTE_RETA_GROUP_SIZE];
61
62         uint8_t rss_key[40];                /**< 40-byte hash key. */
63 };
64 static struct rte_eth_link pmd_link = {
65         .link_speed = ETH_SPEED_NUM_10G,
66         .link_duplex = ETH_LINK_FULL_DUPLEX,
67         .link_status = ETH_LINK_DOWN,
68         .link_autoneg = ETH_LINK_FIXED,
69 };
70
71 static int eth_null_logtype;
72
73 #define PMD_LOG(level, fmt, args...) \
74         rte_log(RTE_LOG_ ## level, eth_null_logtype, \
75                 "%s(): " fmt "\n", __func__, ##args)
76
77 static uint16_t
78 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
79 {
80         int i;
81         struct null_queue *h = q;
82         unsigned int packet_size;
83
84         if ((q == NULL) || (bufs == NULL))
85                 return 0;
86
87         packet_size = h->internals->packet_size;
88         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
89                 return 0;
90
91         for (i = 0; i < nb_bufs; i++) {
92                 bufs[i]->data_len = (uint16_t)packet_size;
93                 bufs[i]->pkt_len = packet_size;
94                 bufs[i]->port = h->internals->port_id;
95         }
96
97         rte_atomic64_add(&(h->rx_pkts), i);
98
99         return i;
100 }
101
102 static uint16_t
103 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
104 {
105         int i;
106         struct null_queue *h = q;
107         unsigned int packet_size;
108
109         if ((q == NULL) || (bufs == NULL))
110                 return 0;
111
112         packet_size = h->internals->packet_size;
113         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
114                 return 0;
115
116         for (i = 0; i < nb_bufs; i++) {
117                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
118                                         packet_size);
119                 bufs[i]->data_len = (uint16_t)packet_size;
120                 bufs[i]->pkt_len = packet_size;
121                 bufs[i]->port = h->internals->port_id;
122         }
123
124         rte_atomic64_add(&(h->rx_pkts), i);
125
126         return i;
127 }
128
129 static uint16_t
130 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
131 {
132         int i;
133         struct null_queue *h = q;
134
135         if ((q == NULL) || (bufs == NULL))
136                 return 0;
137
138         for (i = 0; i < nb_bufs; i++)
139                 rte_pktmbuf_free(bufs[i]);
140
141         rte_atomic64_add(&(h->tx_pkts), i);
142
143         return i;
144 }
145
146 static uint16_t
147 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
148 {
149         int i;
150         struct null_queue *h = q;
151         unsigned int packet_size;
152
153         if ((q == NULL) || (bufs == NULL))
154                 return 0;
155
156         packet_size = h->internals->packet_size;
157         for (i = 0; i < nb_bufs; i++) {
158                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
159                                         packet_size);
160                 rte_pktmbuf_free(bufs[i]);
161         }
162
163         rte_atomic64_add(&(h->tx_pkts), i);
164
165         return i;
166 }
167
168 static int
169 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
170 {
171         return 0;
172 }
173
174 static int
175 eth_dev_start(struct rte_eth_dev *dev)
176 {
177         if (dev == NULL)
178                 return -EINVAL;
179
180         dev->data->dev_link.link_status = ETH_LINK_UP;
181         return 0;
182 }
183
184 static void
185 eth_dev_stop(struct rte_eth_dev *dev)
186 {
187         if (dev == NULL)
188                 return;
189
190         dev->data->dev_link.link_status = ETH_LINK_DOWN;
191 }
192
193 static int
194 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
195                 uint16_t nb_rx_desc __rte_unused,
196                 unsigned int socket_id __rte_unused,
197                 const struct rte_eth_rxconf *rx_conf __rte_unused,
198                 struct rte_mempool *mb_pool)
199 {
200         struct rte_mbuf *dummy_packet;
201         struct pmd_internals *internals;
202         unsigned int packet_size;
203
204         if ((dev == NULL) || (mb_pool == NULL))
205                 return -EINVAL;
206
207         internals = dev->data->dev_private;
208
209         if (rx_queue_id >= dev->data->nb_rx_queues)
210                 return -ENODEV;
211
212         packet_size = internals->packet_size;
213
214         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
215         dev->data->rx_queues[rx_queue_id] =
216                 &internals->rx_null_queues[rx_queue_id];
217         dummy_packet = rte_zmalloc_socket(NULL,
218                         packet_size, 0, dev->data->numa_node);
219         if (dummy_packet == NULL)
220                 return -ENOMEM;
221
222         internals->rx_null_queues[rx_queue_id].internals = internals;
223         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
224
225         return 0;
226 }
227
228 static int
229 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
230                 uint16_t nb_tx_desc __rte_unused,
231                 unsigned int socket_id __rte_unused,
232                 const struct rte_eth_txconf *tx_conf __rte_unused)
233 {
234         struct rte_mbuf *dummy_packet;
235         struct pmd_internals *internals;
236         unsigned int packet_size;
237
238         if (dev == NULL)
239                 return -EINVAL;
240
241         internals = dev->data->dev_private;
242
243         if (tx_queue_id >= dev->data->nb_tx_queues)
244                 return -ENODEV;
245
246         packet_size = internals->packet_size;
247
248         dev->data->tx_queues[tx_queue_id] =
249                 &internals->tx_null_queues[tx_queue_id];
250         dummy_packet = rte_zmalloc_socket(NULL,
251                         packet_size, 0, dev->data->numa_node);
252         if (dummy_packet == NULL)
253                 return -ENOMEM;
254
255         internals->tx_null_queues[tx_queue_id].internals = internals;
256         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
257
258         return 0;
259 }
260
261 static int
262 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
263 {
264         return 0;
265 }
266
267 static int
268 eth_dev_info(struct rte_eth_dev *dev,
269                 struct rte_eth_dev_info *dev_info)
270 {
271         struct pmd_internals *internals;
272
273         if ((dev == NULL) || (dev_info == NULL))
274                 return -EINVAL;
275
276         internals = dev->data->dev_private;
277         dev_info->max_mac_addrs = 1;
278         dev_info->max_rx_pktlen = (uint32_t)-1;
279         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
280         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
281         dev_info->min_rx_bufsize = 0;
282         dev_info->reta_size = internals->reta_size;
283         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
284
285         return 0;
286 }
287
288 static int
289 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
290 {
291         unsigned int i, num_stats;
292         unsigned long rx_total = 0, tx_total = 0;
293         const struct pmd_internals *internal;
294
295         if ((dev == NULL) || (igb_stats == NULL))
296                 return -EINVAL;
297
298         internal = dev->data->dev_private;
299         num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
300                         RTE_MIN(dev->data->nb_rx_queues,
301                                 RTE_DIM(internal->rx_null_queues)));
302         for (i = 0; i < num_stats; i++) {
303                 igb_stats->q_ipackets[i] =
304                         internal->rx_null_queues[i].rx_pkts.cnt;
305                 rx_total += igb_stats->q_ipackets[i];
306         }
307
308         num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
309                         RTE_MIN(dev->data->nb_tx_queues,
310                                 RTE_DIM(internal->tx_null_queues)));
311         for (i = 0; i < num_stats; i++) {
312                 igb_stats->q_opackets[i] =
313                         internal->tx_null_queues[i].tx_pkts.cnt;
314                 tx_total += igb_stats->q_opackets[i];
315         }
316
317         igb_stats->ipackets = rx_total;
318         igb_stats->opackets = tx_total;
319
320         return 0;
321 }
322
323 static int
324 eth_stats_reset(struct rte_eth_dev *dev)
325 {
326         unsigned int i;
327         struct pmd_internals *internal;
328
329         if (dev == NULL)
330                 return -EINVAL;
331
332         internal = dev->data->dev_private;
333         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
334                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
335         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
336                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
337
338         return 0;
339 }
340
341 static void
342 eth_queue_release(void *q)
343 {
344         struct null_queue *nq;
345
346         if (q == NULL)
347                 return;
348
349         nq = q;
350         rte_free(nq->dummy_packet);
351 }
352
353 static int
354 eth_link_update(struct rte_eth_dev *dev __rte_unused,
355                 int wait_to_complete __rte_unused) { return 0; }
356
357 static int
358 eth_rss_reta_update(struct rte_eth_dev *dev,
359                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
360 {
361         int i, j;
362         struct pmd_internals *internal = dev->data->dev_private;
363
364         if (reta_size != internal->reta_size)
365                 return -EINVAL;
366
367         rte_spinlock_lock(&internal->rss_lock);
368
369         /* Copy RETA table */
370         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
371                 internal->reta_conf[i].mask = reta_conf[i].mask;
372                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
373                         if ((reta_conf[i].mask >> j) & 0x01)
374                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
375         }
376
377         rte_spinlock_unlock(&internal->rss_lock);
378
379         return 0;
380 }
381
382 static int
383 eth_rss_reta_query(struct rte_eth_dev *dev,
384                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
385 {
386         int i, j;
387         struct pmd_internals *internal = dev->data->dev_private;
388
389         if (reta_size != internal->reta_size)
390                 return -EINVAL;
391
392         rte_spinlock_lock(&internal->rss_lock);
393
394         /* Copy RETA table */
395         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
396                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
397                         if ((reta_conf[i].mask >> j) & 0x01)
398                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
399         }
400
401         rte_spinlock_unlock(&internal->rss_lock);
402
403         return 0;
404 }
405
406 static int
407 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
408 {
409         struct pmd_internals *internal = dev->data->dev_private;
410
411         rte_spinlock_lock(&internal->rss_lock);
412
413         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
414                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
415                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
416
417         if (rss_conf->rss_key)
418                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
419
420         rte_spinlock_unlock(&internal->rss_lock);
421
422         return 0;
423 }
424
425 static int
426 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
427                 struct rte_eth_rss_conf *rss_conf)
428 {
429         struct pmd_internals *internal = dev->data->dev_private;
430
431         rte_spinlock_lock(&internal->rss_lock);
432
433         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
434         if (rss_conf->rss_key)
435                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
436
437         rte_spinlock_unlock(&internal->rss_lock);
438
439         return 0;
440 }
441
442 static int
443 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
444                     __rte_unused struct rte_ether_addr *addr)
445 {
446         return 0;
447 }
448
449 static const struct eth_dev_ops ops = {
450         .dev_start = eth_dev_start,
451         .dev_stop = eth_dev_stop,
452         .dev_configure = eth_dev_configure,
453         .dev_infos_get = eth_dev_info,
454         .rx_queue_setup = eth_rx_queue_setup,
455         .tx_queue_setup = eth_tx_queue_setup,
456         .rx_queue_release = eth_queue_release,
457         .tx_queue_release = eth_queue_release,
458         .mtu_set = eth_mtu_set,
459         .link_update = eth_link_update,
460         .mac_addr_set = eth_mac_address_set,
461         .stats_get = eth_stats_get,
462         .stats_reset = eth_stats_reset,
463         .reta_update = eth_rss_reta_update,
464         .reta_query = eth_rss_reta_query,
465         .rss_hash_update = eth_rss_hash_update,
466         .rss_hash_conf_get = eth_rss_hash_conf_get
467 };
468
469 static int
470 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
471 {
472         const unsigned int nb_rx_queues = 1;
473         const unsigned int nb_tx_queues = 1;
474         struct rte_eth_dev_data *data;
475         struct pmd_internals *internals = NULL;
476         struct rte_eth_dev *eth_dev = NULL;
477
478         static const uint8_t default_rss_key[40] = {
479                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
480                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
481                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
482                 0xBE, 0xAC, 0x01, 0xFA
483         };
484
485         if (dev->device.numa_node == SOCKET_ID_ANY)
486                 dev->device.numa_node = rte_socket_id();
487
488         PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
489                 dev->device.numa_node);
490
491         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
492         if (!eth_dev)
493                 return -ENOMEM;
494
495         /* now put it all together
496          * - store queue data in internals,
497          * - store numa_node info in ethdev data
498          * - point eth_dev_data to internals
499          * - and point eth_dev structure to new eth_dev_data structure
500          */
501         /* NOTE: we'll replace the data element, of originally allocated eth_dev
502          * so the nulls are local per-process */
503
504         internals = eth_dev->data->dev_private;
505         internals->packet_size = args->packet_size;
506         internals->packet_copy = args->packet_copy;
507         internals->port_id = eth_dev->data->port_id;
508         rte_eth_random_addr(internals->eth_addr.addr_bytes);
509
510         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
511         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
512
513         rte_memcpy(internals->rss_key, default_rss_key, 40);
514
515         data = eth_dev->data;
516         data->nb_rx_queues = (uint16_t)nb_rx_queues;
517         data->nb_tx_queues = (uint16_t)nb_tx_queues;
518         data->dev_link = pmd_link;
519         data->mac_addrs = &internals->eth_addr;
520         data->promiscuous = 1;
521         data->all_multicast = 1;
522
523         eth_dev->dev_ops = &ops;
524
525         /* finally assign rx and tx ops */
526         if (internals->packet_copy) {
527                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
528                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
529         } else {
530                 eth_dev->rx_pkt_burst = eth_null_rx;
531                 eth_dev->tx_pkt_burst = eth_null_tx;
532         }
533
534         rte_eth_dev_probing_finish(eth_dev);
535         return 0;
536 }
537
538 static inline int
539 get_packet_size_arg(const char *key __rte_unused,
540                 const char *value, void *extra_args)
541 {
542         const char *a = value;
543         unsigned int *packet_size = extra_args;
544
545         if ((value == NULL) || (extra_args == NULL))
546                 return -EINVAL;
547
548         *packet_size = (unsigned int)strtoul(a, NULL, 0);
549         if (*packet_size == UINT_MAX)
550                 return -1;
551
552         return 0;
553 }
554
555 static inline int
556 get_packet_copy_arg(const char *key __rte_unused,
557                 const char *value, void *extra_args)
558 {
559         const char *a = value;
560         unsigned int *packet_copy = extra_args;
561
562         if ((value == NULL) || (extra_args == NULL))
563                 return -EINVAL;
564
565         *packet_copy = (unsigned int)strtoul(a, NULL, 0);
566         if (*packet_copy == UINT_MAX)
567                 return -1;
568
569         return 0;
570 }
571
572 static int
573 rte_pmd_null_probe(struct rte_vdev_device *dev)
574 {
575         const char *name, *params;
576         struct pmd_options args = {
577                 .packet_copy = default_packet_copy,
578                 .packet_size = default_packet_size,
579         };
580         struct rte_kvargs *kvlist = NULL;
581         struct rte_eth_dev *eth_dev;
582         int ret;
583
584         if (!dev)
585                 return -EINVAL;
586
587         name = rte_vdev_device_name(dev);
588         params = rte_vdev_device_args(dev);
589         PMD_LOG(INFO, "Initializing pmd_null for %s", name);
590
591         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
592                 struct pmd_internals *internals;
593                 eth_dev = rte_eth_dev_attach_secondary(name);
594                 if (!eth_dev) {
595                         PMD_LOG(ERR, "Failed to probe %s", name);
596                         return -1;
597                 }
598                 /* TODO: request info from primary to set up Rx and Tx */
599                 eth_dev->dev_ops = &ops;
600                 eth_dev->device = &dev->device;
601                 internals = eth_dev->data->dev_private;
602                 if (internals->packet_copy) {
603                         eth_dev->rx_pkt_burst = eth_null_copy_rx;
604                         eth_dev->tx_pkt_burst = eth_null_copy_tx;
605                 } else {
606                         eth_dev->rx_pkt_burst = eth_null_rx;
607                         eth_dev->tx_pkt_burst = eth_null_tx;
608                 }
609                 rte_eth_dev_probing_finish(eth_dev);
610                 return 0;
611         }
612
613         if (params != NULL) {
614                 kvlist = rte_kvargs_parse(params, valid_arguments);
615                 if (kvlist == NULL)
616                         return -1;
617
618                 ret = rte_kvargs_process(kvlist,
619                                 ETH_NULL_PACKET_SIZE_ARG,
620                                 &get_packet_size_arg, &args.packet_size);
621                 if (ret < 0)
622                         goto free_kvlist;
623
624
625                 ret = rte_kvargs_process(kvlist,
626                                 ETH_NULL_PACKET_COPY_ARG,
627                                 &get_packet_copy_arg, &args.packet_copy);
628                 if (ret < 0)
629                         goto free_kvlist;
630         }
631
632         PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
633                         "packet copy is %s", args.packet_size,
634                         args.packet_copy ? "enabled" : "disabled");
635
636         ret = eth_dev_null_create(dev, &args);
637
638 free_kvlist:
639         if (kvlist)
640                 rte_kvargs_free(kvlist);
641         return ret;
642 }
643
644 static int
645 rte_pmd_null_remove(struct rte_vdev_device *dev)
646 {
647         struct rte_eth_dev *eth_dev = NULL;
648
649         if (!dev)
650                 return -EINVAL;
651
652         PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
653                         rte_socket_id());
654
655         /* find the ethdev entry */
656         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
657         if (eth_dev == NULL)
658                 return -1;
659
660         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
661                 /* mac_addrs must not be freed alone because part of dev_private */
662                 eth_dev->data->mac_addrs = NULL;
663
664         rte_eth_dev_release_port(eth_dev);
665
666         return 0;
667 }
668
669 static struct rte_vdev_driver pmd_null_drv = {
670         .probe = rte_pmd_null_probe,
671         .remove = rte_pmd_null_remove,
672 };
673
674 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
675 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
676 RTE_PMD_REGISTER_PARAM_STRING(net_null,
677         "size=<int> "
678         "copy=<int>");
679
680 RTE_INIT(eth_null_init_log)
681 {
682         eth_null_logtype = rte_log_register("pmd.net.null");
683         if (eth_null_logtype >= 0)
684                 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
685 }