aec0cab8f97397908f81528727c9b84ff5de2980
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) IGEL Co.,Ltd.
3  *  All rights reserved.
4  */
5
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
14
15 #define ETH_NULL_PACKET_SIZE_ARG        "size"
16 #define ETH_NULL_PACKET_COPY_ARG        "copy"
17
18 static unsigned default_packet_size = 64;
19 static unsigned default_packet_copy;
20
21 static const char *valid_arguments[] = {
22         ETH_NULL_PACKET_SIZE_ARG,
23         ETH_NULL_PACKET_COPY_ARG,
24         NULL
25 };
26
27 struct pmd_internals;
28
29 struct null_queue {
30         struct pmd_internals *internals;
31
32         struct rte_mempool *mb_pool;
33         struct rte_mbuf *dummy_packet;
34
35         rte_atomic64_t rx_pkts;
36         rte_atomic64_t tx_pkts;
37 };
38
39 struct pmd_internals {
40         unsigned packet_size;
41         unsigned packet_copy;
42         uint16_t port_id;
43
44         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
45         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
46
47         struct rte_ether_addr eth_addr;
48         /** Bit mask of RSS offloads, the bit offset also means flow type */
49         uint64_t flow_type_rss_offloads;
50
51         rte_spinlock_t rss_lock;
52
53         uint16_t reta_size;
54         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
55                         RTE_RETA_GROUP_SIZE];
56
57         uint8_t rss_key[40];                /**< 40-byte hash key. */
58 };
59 static struct rte_eth_link pmd_link = {
60         .link_speed = ETH_SPEED_NUM_10G,
61         .link_duplex = ETH_LINK_FULL_DUPLEX,
62         .link_status = ETH_LINK_DOWN,
63         .link_autoneg = ETH_LINK_FIXED,
64 };
65
66 static int eth_null_logtype;
67
68 #define PMD_LOG(level, fmt, args...) \
69         rte_log(RTE_LOG_ ## level, eth_null_logtype, \
70                 "%s(): " fmt "\n", __func__, ##args)
71
72 static uint16_t
73 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
74 {
75         int i;
76         struct null_queue *h = q;
77         unsigned packet_size;
78
79         if ((q == NULL) || (bufs == NULL))
80                 return 0;
81
82         packet_size = h->internals->packet_size;
83         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
84                 return 0;
85
86         for (i = 0; i < nb_bufs; i++) {
87                 bufs[i]->data_len = (uint16_t)packet_size;
88                 bufs[i]->pkt_len = packet_size;
89                 bufs[i]->port = h->internals->port_id;
90         }
91
92         rte_atomic64_add(&(h->rx_pkts), i);
93
94         return i;
95 }
96
97 static uint16_t
98 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
99 {
100         int i;
101         struct null_queue *h = q;
102         unsigned packet_size;
103
104         if ((q == NULL) || (bufs == NULL))
105                 return 0;
106
107         packet_size = h->internals->packet_size;
108         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
109                 return 0;
110
111         for (i = 0; i < nb_bufs; i++) {
112                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
113                                         packet_size);
114                 bufs[i]->data_len = (uint16_t)packet_size;
115                 bufs[i]->pkt_len = packet_size;
116                 bufs[i]->port = h->internals->port_id;
117         }
118
119         rte_atomic64_add(&(h->rx_pkts), i);
120
121         return i;
122 }
123
124 static uint16_t
125 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
126 {
127         int i;
128         struct null_queue *h = q;
129
130         if ((q == NULL) || (bufs == NULL))
131                 return 0;
132
133         for (i = 0; i < nb_bufs; i++)
134                 rte_pktmbuf_free(bufs[i]);
135
136         rte_atomic64_add(&(h->tx_pkts), i);
137
138         return i;
139 }
140
141 static uint16_t
142 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143 {
144         int i;
145         struct null_queue *h = q;
146         unsigned packet_size;
147
148         if ((q == NULL) || (bufs == NULL))
149                 return 0;
150
151         packet_size = h->internals->packet_size;
152         for (i = 0; i < nb_bufs; i++) {
153                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
154                                         packet_size);
155                 rte_pktmbuf_free(bufs[i]);
156         }
157
158         rte_atomic64_add(&(h->tx_pkts), i);
159
160         return i;
161 }
162
163 static int
164 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
165 {
166         return 0;
167 }
168
169 static int
170 eth_dev_start(struct rte_eth_dev *dev)
171 {
172         if (dev == NULL)
173                 return -EINVAL;
174
175         dev->data->dev_link.link_status = ETH_LINK_UP;
176         return 0;
177 }
178
179 static void
180 eth_dev_stop(struct rte_eth_dev *dev)
181 {
182         if (dev == NULL)
183                 return;
184
185         dev->data->dev_link.link_status = ETH_LINK_DOWN;
186 }
187
188 static int
189 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
190                 uint16_t nb_rx_desc __rte_unused,
191                 unsigned int socket_id __rte_unused,
192                 const struct rte_eth_rxconf *rx_conf __rte_unused,
193                 struct rte_mempool *mb_pool)
194 {
195         struct rte_mbuf *dummy_packet;
196         struct pmd_internals *internals;
197         unsigned packet_size;
198
199         if ((dev == NULL) || (mb_pool == NULL))
200                 return -EINVAL;
201
202         internals = dev->data->dev_private;
203
204         if (rx_queue_id >= dev->data->nb_rx_queues)
205                 return -ENODEV;
206
207         packet_size = internals->packet_size;
208
209         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
210         dev->data->rx_queues[rx_queue_id] =
211                 &internals->rx_null_queues[rx_queue_id];
212         dummy_packet = rte_zmalloc_socket(NULL,
213                         packet_size, 0, dev->data->numa_node);
214         if (dummy_packet == NULL)
215                 return -ENOMEM;
216
217         internals->rx_null_queues[rx_queue_id].internals = internals;
218         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
219
220         return 0;
221 }
222
223 static int
224 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
225                 uint16_t nb_tx_desc __rte_unused,
226                 unsigned int socket_id __rte_unused,
227                 const struct rte_eth_txconf *tx_conf __rte_unused)
228 {
229         struct rte_mbuf *dummy_packet;
230         struct pmd_internals *internals;
231         unsigned packet_size;
232
233         if (dev == NULL)
234                 return -EINVAL;
235
236         internals = dev->data->dev_private;
237
238         if (tx_queue_id >= dev->data->nb_tx_queues)
239                 return -ENODEV;
240
241         packet_size = internals->packet_size;
242
243         dev->data->tx_queues[tx_queue_id] =
244                 &internals->tx_null_queues[tx_queue_id];
245         dummy_packet = rte_zmalloc_socket(NULL,
246                         packet_size, 0, dev->data->numa_node);
247         if (dummy_packet == NULL)
248                 return -ENOMEM;
249
250         internals->tx_null_queues[tx_queue_id].internals = internals;
251         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
252
253         return 0;
254 }
255
256 static int
257 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
258 {
259         return 0;
260 }
261
262 static int
263 eth_dev_info(struct rte_eth_dev *dev,
264                 struct rte_eth_dev_info *dev_info)
265 {
266         struct pmd_internals *internals;
267
268         if ((dev == NULL) || (dev_info == NULL))
269                 return -EINVAL;
270
271         internals = dev->data->dev_private;
272         dev_info->max_mac_addrs = 1;
273         dev_info->max_rx_pktlen = (uint32_t)-1;
274         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
275         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
276         dev_info->min_rx_bufsize = 0;
277         dev_info->reta_size = internals->reta_size;
278         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
279
280         return 0;
281 }
282
283 static int
284 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
285 {
286         unsigned i, num_stats;
287         unsigned long rx_total = 0, tx_total = 0;
288         const struct pmd_internals *internal;
289
290         if ((dev == NULL) || (igb_stats == NULL))
291                 return -EINVAL;
292
293         internal = dev->data->dev_private;
294         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
295                         RTE_MIN(dev->data->nb_rx_queues,
296                                 RTE_DIM(internal->rx_null_queues)));
297         for (i = 0; i < num_stats; i++) {
298                 igb_stats->q_ipackets[i] =
299                         internal->rx_null_queues[i].rx_pkts.cnt;
300                 rx_total += igb_stats->q_ipackets[i];
301         }
302
303         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
304                         RTE_MIN(dev->data->nb_tx_queues,
305                                 RTE_DIM(internal->tx_null_queues)));
306         for (i = 0; i < num_stats; i++) {
307                 igb_stats->q_opackets[i] =
308                         internal->tx_null_queues[i].tx_pkts.cnt;
309                 tx_total += igb_stats->q_opackets[i];
310         }
311
312         igb_stats->ipackets = rx_total;
313         igb_stats->opackets = tx_total;
314
315         return 0;
316 }
317
318 static void
319 eth_stats_reset(struct rte_eth_dev *dev)
320 {
321         unsigned i;
322         struct pmd_internals *internal;
323
324         if (dev == NULL)
325                 return;
326
327         internal = dev->data->dev_private;
328         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
329                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
330         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
331                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
332 }
333
334 static void
335 eth_queue_release(void *q)
336 {
337         struct null_queue *nq;
338
339         if (q == NULL)
340                 return;
341
342         nq = q;
343         rte_free(nq->dummy_packet);
344 }
345
346 static int
347 eth_link_update(struct rte_eth_dev *dev __rte_unused,
348                 int wait_to_complete __rte_unused) { return 0; }
349
350 static int
351 eth_rss_reta_update(struct rte_eth_dev *dev,
352                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
353 {
354         int i, j;
355         struct pmd_internals *internal = dev->data->dev_private;
356
357         if (reta_size != internal->reta_size)
358                 return -EINVAL;
359
360         rte_spinlock_lock(&internal->rss_lock);
361
362         /* Copy RETA table */
363         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
364                 internal->reta_conf[i].mask = reta_conf[i].mask;
365                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
366                         if ((reta_conf[i].mask >> j) & 0x01)
367                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
368         }
369
370         rte_spinlock_unlock(&internal->rss_lock);
371
372         return 0;
373 }
374
375 static int
376 eth_rss_reta_query(struct rte_eth_dev *dev,
377                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
378 {
379         int i, j;
380         struct pmd_internals *internal = dev->data->dev_private;
381
382         if (reta_size != internal->reta_size)
383                 return -EINVAL;
384
385         rte_spinlock_lock(&internal->rss_lock);
386
387         /* Copy RETA table */
388         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
389                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
390                         if ((reta_conf[i].mask >> j) & 0x01)
391                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
392         }
393
394         rte_spinlock_unlock(&internal->rss_lock);
395
396         return 0;
397 }
398
399 static int
400 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
401 {
402         struct pmd_internals *internal = dev->data->dev_private;
403
404         rte_spinlock_lock(&internal->rss_lock);
405
406         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
407                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
408                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
409
410         if (rss_conf->rss_key)
411                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
412
413         rte_spinlock_unlock(&internal->rss_lock);
414
415         return 0;
416 }
417
418 static int
419 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
420                 struct rte_eth_rss_conf *rss_conf)
421 {
422         struct pmd_internals *internal = dev->data->dev_private;
423
424         rte_spinlock_lock(&internal->rss_lock);
425
426         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
427         if (rss_conf->rss_key)
428                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
429
430         rte_spinlock_unlock(&internal->rss_lock);
431
432         return 0;
433 }
434
435 static int
436 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
437                     __rte_unused struct rte_ether_addr *addr)
438 {
439         return 0;
440 }
441
442 static const struct eth_dev_ops ops = {
443         .dev_start = eth_dev_start,
444         .dev_stop = eth_dev_stop,
445         .dev_configure = eth_dev_configure,
446         .dev_infos_get = eth_dev_info,
447         .rx_queue_setup = eth_rx_queue_setup,
448         .tx_queue_setup = eth_tx_queue_setup,
449         .rx_queue_release = eth_queue_release,
450         .tx_queue_release = eth_queue_release,
451         .mtu_set = eth_mtu_set,
452         .link_update = eth_link_update,
453         .mac_addr_set = eth_mac_address_set,
454         .stats_get = eth_stats_get,
455         .stats_reset = eth_stats_reset,
456         .reta_update = eth_rss_reta_update,
457         .reta_query = eth_rss_reta_query,
458         .rss_hash_update = eth_rss_hash_update,
459         .rss_hash_conf_get = eth_rss_hash_conf_get
460 };
461
462 static int
463 eth_dev_null_create(struct rte_vdev_device *dev,
464                 unsigned packet_size,
465                 unsigned packet_copy)
466 {
467         const unsigned nb_rx_queues = 1;
468         const unsigned nb_tx_queues = 1;
469         struct rte_eth_dev_data *data;
470         struct pmd_internals *internals = NULL;
471         struct rte_eth_dev *eth_dev = NULL;
472
473         static const uint8_t default_rss_key[40] = {
474                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
475                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
476                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
477                 0xBE, 0xAC, 0x01, 0xFA
478         };
479
480         if (dev->device.numa_node == SOCKET_ID_ANY)
481                 dev->device.numa_node = rte_socket_id();
482
483         PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
484                 dev->device.numa_node);
485
486         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
487         if (!eth_dev)
488                 return -ENOMEM;
489
490         /* now put it all together
491          * - store queue data in internals,
492          * - store numa_node info in ethdev data
493          * - point eth_dev_data to internals
494          * - and point eth_dev structure to new eth_dev_data structure
495          */
496         /* NOTE: we'll replace the data element, of originally allocated eth_dev
497          * so the nulls are local per-process */
498
499         internals = eth_dev->data->dev_private;
500         internals->packet_size = packet_size;
501         internals->packet_copy = packet_copy;
502         internals->port_id = eth_dev->data->port_id;
503         rte_eth_random_addr(internals->eth_addr.addr_bytes);
504
505         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
506         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
507
508         rte_memcpy(internals->rss_key, default_rss_key, 40);
509
510         data = eth_dev->data;
511         data->nb_rx_queues = (uint16_t)nb_rx_queues;
512         data->nb_tx_queues = (uint16_t)nb_tx_queues;
513         data->dev_link = pmd_link;
514         data->mac_addrs = &internals->eth_addr;
515
516         eth_dev->dev_ops = &ops;
517
518         /* finally assign rx and tx ops */
519         if (packet_copy) {
520                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
521                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
522         } else {
523                 eth_dev->rx_pkt_burst = eth_null_rx;
524                 eth_dev->tx_pkt_burst = eth_null_tx;
525         }
526
527         rte_eth_dev_probing_finish(eth_dev);
528         return 0;
529 }
530
531 static inline int
532 get_packet_size_arg(const char *key __rte_unused,
533                 const char *value, void *extra_args)
534 {
535         const char *a = value;
536         unsigned *packet_size = extra_args;
537
538         if ((value == NULL) || (extra_args == NULL))
539                 return -EINVAL;
540
541         *packet_size = (unsigned)strtoul(a, NULL, 0);
542         if (*packet_size == UINT_MAX)
543                 return -1;
544
545         return 0;
546 }
547
548 static inline int
549 get_packet_copy_arg(const char *key __rte_unused,
550                 const char *value, void *extra_args)
551 {
552         const char *a = value;
553         unsigned *packet_copy = extra_args;
554
555         if ((value == NULL) || (extra_args == NULL))
556                 return -EINVAL;
557
558         *packet_copy = (unsigned)strtoul(a, NULL, 0);
559         if (*packet_copy == UINT_MAX)
560                 return -1;
561
562         return 0;
563 }
564
565 static int
566 rte_pmd_null_probe(struct rte_vdev_device *dev)
567 {
568         const char *name, *params;
569         unsigned packet_size = default_packet_size;
570         unsigned packet_copy = default_packet_copy;
571         struct rte_kvargs *kvlist = NULL;
572         struct rte_eth_dev *eth_dev;
573         int ret;
574
575         if (!dev)
576                 return -EINVAL;
577
578         name = rte_vdev_device_name(dev);
579         params = rte_vdev_device_args(dev);
580         PMD_LOG(INFO, "Initializing pmd_null for %s", name);
581
582         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
583                 eth_dev = rte_eth_dev_attach_secondary(name);
584                 if (!eth_dev) {
585                         PMD_LOG(ERR, "Failed to probe %s", name);
586                         return -1;
587                 }
588                 /* TODO: request info from primary to set up Rx and Tx */
589                 eth_dev->dev_ops = &ops;
590                 eth_dev->device = &dev->device;
591                 rte_eth_dev_probing_finish(eth_dev);
592                 return 0;
593         }
594
595         if (params != NULL) {
596                 kvlist = rte_kvargs_parse(params, valid_arguments);
597                 if (kvlist == NULL)
598                         return -1;
599
600                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
601
602                         ret = rte_kvargs_process(kvlist,
603                                         ETH_NULL_PACKET_SIZE_ARG,
604                                         &get_packet_size_arg, &packet_size);
605                         if (ret < 0)
606                                 goto free_kvlist;
607                 }
608
609                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
610
611                         ret = rte_kvargs_process(kvlist,
612                                         ETH_NULL_PACKET_COPY_ARG,
613                                         &get_packet_copy_arg, &packet_copy);
614                         if (ret < 0)
615                                 goto free_kvlist;
616                 }
617         }
618
619         PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
620                         "packet copy is %s", packet_size,
621                         packet_copy ? "enabled" : "disabled");
622
623         ret = eth_dev_null_create(dev, packet_size, packet_copy);
624
625 free_kvlist:
626         if (kvlist)
627                 rte_kvargs_free(kvlist);
628         return ret;
629 }
630
631 static int
632 rte_pmd_null_remove(struct rte_vdev_device *dev)
633 {
634         struct rte_eth_dev *eth_dev = NULL;
635
636         if (!dev)
637                 return -EINVAL;
638
639         PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
640                         rte_socket_id());
641
642         /* find the ethdev entry */
643         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
644         if (eth_dev == NULL)
645                 return -1;
646
647         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
648                 /* mac_addrs must not be freed alone because part of dev_private */
649                 eth_dev->data->mac_addrs = NULL;
650
651         rte_eth_dev_release_port(eth_dev);
652
653         return 0;
654 }
655
656 static struct rte_vdev_driver pmd_null_drv = {
657         .probe = rte_pmd_null_probe,
658         .remove = rte_pmd_null_remove,
659 };
660
661 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
662 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
663 RTE_PMD_REGISTER_PARAM_STRING(net_null,
664         "size=<int> "
665         "copy=<int>");
666
667 RTE_INIT(eth_null_init_log)
668 {
669         eth_null_logtype = rte_log_register("pmd.net.null");
670         if (eth_null_logtype >= 0)
671                 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
672 }