0c60d241ac19bcb9b7af910fc78b2d6c5efab763
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) IGEL Co.,Ltd.
3  *  All rights reserved.
4  */
5
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
14
15 #define ETH_NULL_PACKET_SIZE_ARG        "size"
16 #define ETH_NULL_PACKET_COPY_ARG        "copy"
17
18 static unsigned default_packet_size = 64;
19 static unsigned default_packet_copy;
20
21 static const char *valid_arguments[] = {
22         ETH_NULL_PACKET_SIZE_ARG,
23         ETH_NULL_PACKET_COPY_ARG,
24         NULL
25 };
26
27 struct pmd_internals;
28
29 struct null_queue {
30         struct pmd_internals *internals;
31
32         struct rte_mempool *mb_pool;
33         struct rte_mbuf *dummy_packet;
34
35         rte_atomic64_t rx_pkts;
36         rte_atomic64_t tx_pkts;
37 };
38
39 struct pmd_internals {
40         unsigned packet_size;
41         unsigned packet_copy;
42         uint16_t port_id;
43
44         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
45         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
46
47         struct rte_ether_addr eth_addr;
48         /** Bit mask of RSS offloads, the bit offset also means flow type */
49         uint64_t flow_type_rss_offloads;
50
51         rte_spinlock_t rss_lock;
52
53         uint16_t reta_size;
54         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
55                         RTE_RETA_GROUP_SIZE];
56
57         uint8_t rss_key[40];                /**< 40-byte hash key. */
58 };
59 static struct rte_eth_link pmd_link = {
60         .link_speed = ETH_SPEED_NUM_10G,
61         .link_duplex = ETH_LINK_FULL_DUPLEX,
62         .link_status = ETH_LINK_DOWN,
63         .link_autoneg = ETH_LINK_FIXED,
64 };
65
66 static int eth_null_logtype;
67
68 #define PMD_LOG(level, fmt, args...) \
69         rte_log(RTE_LOG_ ## level, eth_null_logtype, \
70                 "%s(): " fmt "\n", __func__, ##args)
71
72 static uint16_t
73 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
74 {
75         int i;
76         struct null_queue *h = q;
77         unsigned packet_size;
78
79         if ((q == NULL) || (bufs == NULL))
80                 return 0;
81
82         packet_size = h->internals->packet_size;
83         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
84                 return 0;
85
86         for (i = 0; i < nb_bufs; i++) {
87                 bufs[i]->data_len = (uint16_t)packet_size;
88                 bufs[i]->pkt_len = packet_size;
89                 bufs[i]->port = h->internals->port_id;
90         }
91
92         rte_atomic64_add(&(h->rx_pkts), i);
93
94         return i;
95 }
96
97 static uint16_t
98 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
99 {
100         int i;
101         struct null_queue *h = q;
102         unsigned packet_size;
103
104         if ((q == NULL) || (bufs == NULL))
105                 return 0;
106
107         packet_size = h->internals->packet_size;
108         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
109                 return 0;
110
111         for (i = 0; i < nb_bufs; i++) {
112                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
113                                         packet_size);
114                 bufs[i]->data_len = (uint16_t)packet_size;
115                 bufs[i]->pkt_len = packet_size;
116                 bufs[i]->port = h->internals->port_id;
117         }
118
119         rte_atomic64_add(&(h->rx_pkts), i);
120
121         return i;
122 }
123
124 static uint16_t
125 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
126 {
127         int i;
128         struct null_queue *h = q;
129
130         if ((q == NULL) || (bufs == NULL))
131                 return 0;
132
133         for (i = 0; i < nb_bufs; i++)
134                 rte_pktmbuf_free(bufs[i]);
135
136         rte_atomic64_add(&(h->tx_pkts), i);
137
138         return i;
139 }
140
141 static uint16_t
142 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143 {
144         int i;
145         struct null_queue *h = q;
146         unsigned packet_size;
147
148         if ((q == NULL) || (bufs == NULL))
149                 return 0;
150
151         packet_size = h->internals->packet_size;
152         for (i = 0; i < nb_bufs; i++) {
153                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
154                                         packet_size);
155                 rte_pktmbuf_free(bufs[i]);
156         }
157
158         rte_atomic64_add(&(h->tx_pkts), i);
159
160         return i;
161 }
162
163 static int
164 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
165 {
166         return 0;
167 }
168
169 static int
170 eth_dev_start(struct rte_eth_dev *dev)
171 {
172         if (dev == NULL)
173                 return -EINVAL;
174
175         dev->data->dev_link.link_status = ETH_LINK_UP;
176         return 0;
177 }
178
179 static void
180 eth_dev_stop(struct rte_eth_dev *dev)
181 {
182         if (dev == NULL)
183                 return;
184
185         dev->data->dev_link.link_status = ETH_LINK_DOWN;
186 }
187
188 static int
189 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
190                 uint16_t nb_rx_desc __rte_unused,
191                 unsigned int socket_id __rte_unused,
192                 const struct rte_eth_rxconf *rx_conf __rte_unused,
193                 struct rte_mempool *mb_pool)
194 {
195         struct rte_mbuf *dummy_packet;
196         struct pmd_internals *internals;
197         unsigned packet_size;
198
199         if ((dev == NULL) || (mb_pool == NULL))
200                 return -EINVAL;
201
202         internals = dev->data->dev_private;
203
204         if (rx_queue_id >= dev->data->nb_rx_queues)
205                 return -ENODEV;
206
207         packet_size = internals->packet_size;
208
209         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
210         dev->data->rx_queues[rx_queue_id] =
211                 &internals->rx_null_queues[rx_queue_id];
212         dummy_packet = rte_zmalloc_socket(NULL,
213                         packet_size, 0, dev->data->numa_node);
214         if (dummy_packet == NULL)
215                 return -ENOMEM;
216
217         internals->rx_null_queues[rx_queue_id].internals = internals;
218         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
219
220         return 0;
221 }
222
223 static int
224 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
225                 uint16_t nb_tx_desc __rte_unused,
226                 unsigned int socket_id __rte_unused,
227                 const struct rte_eth_txconf *tx_conf __rte_unused)
228 {
229         struct rte_mbuf *dummy_packet;
230         struct pmd_internals *internals;
231         unsigned packet_size;
232
233         if (dev == NULL)
234                 return -EINVAL;
235
236         internals = dev->data->dev_private;
237
238         if (tx_queue_id >= dev->data->nb_tx_queues)
239                 return -ENODEV;
240
241         packet_size = internals->packet_size;
242
243         dev->data->tx_queues[tx_queue_id] =
244                 &internals->tx_null_queues[tx_queue_id];
245         dummy_packet = rte_zmalloc_socket(NULL,
246                         packet_size, 0, dev->data->numa_node);
247         if (dummy_packet == NULL)
248                 return -ENOMEM;
249
250         internals->tx_null_queues[tx_queue_id].internals = internals;
251         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
252
253         return 0;
254 }
255
256 static int
257 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
258 {
259         return 0;
260 }
261
262 static void
263 eth_dev_info(struct rte_eth_dev *dev,
264                 struct rte_eth_dev_info *dev_info)
265 {
266         struct pmd_internals *internals;
267
268         if ((dev == NULL) || (dev_info == NULL))
269                 return;
270
271         internals = dev->data->dev_private;
272         dev_info->max_mac_addrs = 1;
273         dev_info->max_rx_pktlen = (uint32_t)-1;
274         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
275         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
276         dev_info->min_rx_bufsize = 0;
277         dev_info->reta_size = internals->reta_size;
278         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
279 }
280
281 static int
282 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
283 {
284         unsigned i, num_stats;
285         unsigned long rx_total = 0, tx_total = 0;
286         const struct pmd_internals *internal;
287
288         if ((dev == NULL) || (igb_stats == NULL))
289                 return -EINVAL;
290
291         internal = dev->data->dev_private;
292         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
293                         RTE_MIN(dev->data->nb_rx_queues,
294                                 RTE_DIM(internal->rx_null_queues)));
295         for (i = 0; i < num_stats; i++) {
296                 igb_stats->q_ipackets[i] =
297                         internal->rx_null_queues[i].rx_pkts.cnt;
298                 rx_total += igb_stats->q_ipackets[i];
299         }
300
301         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
302                         RTE_MIN(dev->data->nb_tx_queues,
303                                 RTE_DIM(internal->tx_null_queues)));
304         for (i = 0; i < num_stats; i++) {
305                 igb_stats->q_opackets[i] =
306                         internal->tx_null_queues[i].tx_pkts.cnt;
307                 tx_total += igb_stats->q_opackets[i];
308         }
309
310         igb_stats->ipackets = rx_total;
311         igb_stats->opackets = tx_total;
312
313         return 0;
314 }
315
316 static void
317 eth_stats_reset(struct rte_eth_dev *dev)
318 {
319         unsigned i;
320         struct pmd_internals *internal;
321
322         if (dev == NULL)
323                 return;
324
325         internal = dev->data->dev_private;
326         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
327                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
328         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
329                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
330 }
331
332 static void
333 eth_queue_release(void *q)
334 {
335         struct null_queue *nq;
336
337         if (q == NULL)
338                 return;
339
340         nq = q;
341         rte_free(nq->dummy_packet);
342 }
343
344 static int
345 eth_link_update(struct rte_eth_dev *dev __rte_unused,
346                 int wait_to_complete __rte_unused) { return 0; }
347
348 static int
349 eth_rss_reta_update(struct rte_eth_dev *dev,
350                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
351 {
352         int i, j;
353         struct pmd_internals *internal = dev->data->dev_private;
354
355         if (reta_size != internal->reta_size)
356                 return -EINVAL;
357
358         rte_spinlock_lock(&internal->rss_lock);
359
360         /* Copy RETA table */
361         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
362                 internal->reta_conf[i].mask = reta_conf[i].mask;
363                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
364                         if ((reta_conf[i].mask >> j) & 0x01)
365                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
366         }
367
368         rte_spinlock_unlock(&internal->rss_lock);
369
370         return 0;
371 }
372
373 static int
374 eth_rss_reta_query(struct rte_eth_dev *dev,
375                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
376 {
377         int i, j;
378         struct pmd_internals *internal = dev->data->dev_private;
379
380         if (reta_size != internal->reta_size)
381                 return -EINVAL;
382
383         rte_spinlock_lock(&internal->rss_lock);
384
385         /* Copy RETA table */
386         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
387                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
388                         if ((reta_conf[i].mask >> j) & 0x01)
389                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
390         }
391
392         rte_spinlock_unlock(&internal->rss_lock);
393
394         return 0;
395 }
396
397 static int
398 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
399 {
400         struct pmd_internals *internal = dev->data->dev_private;
401
402         rte_spinlock_lock(&internal->rss_lock);
403
404         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
405                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
406                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
407
408         if (rss_conf->rss_key)
409                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
410
411         rte_spinlock_unlock(&internal->rss_lock);
412
413         return 0;
414 }
415
416 static int
417 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
418                 struct rte_eth_rss_conf *rss_conf)
419 {
420         struct pmd_internals *internal = dev->data->dev_private;
421
422         rte_spinlock_lock(&internal->rss_lock);
423
424         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
425         if (rss_conf->rss_key)
426                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
427
428         rte_spinlock_unlock(&internal->rss_lock);
429
430         return 0;
431 }
432
433 static int
434 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
435                     __rte_unused struct rte_ether_addr *addr)
436 {
437         return 0;
438 }
439
440 static const struct eth_dev_ops ops = {
441         .dev_start = eth_dev_start,
442         .dev_stop = eth_dev_stop,
443         .dev_configure = eth_dev_configure,
444         .dev_infos_get = eth_dev_info,
445         .rx_queue_setup = eth_rx_queue_setup,
446         .tx_queue_setup = eth_tx_queue_setup,
447         .rx_queue_release = eth_queue_release,
448         .tx_queue_release = eth_queue_release,
449         .mtu_set = eth_mtu_set,
450         .link_update = eth_link_update,
451         .mac_addr_set = eth_mac_address_set,
452         .stats_get = eth_stats_get,
453         .stats_reset = eth_stats_reset,
454         .reta_update = eth_rss_reta_update,
455         .reta_query = eth_rss_reta_query,
456         .rss_hash_update = eth_rss_hash_update,
457         .rss_hash_conf_get = eth_rss_hash_conf_get
458 };
459
460 static int
461 eth_dev_null_create(struct rte_vdev_device *dev,
462                 unsigned packet_size,
463                 unsigned packet_copy)
464 {
465         const unsigned nb_rx_queues = 1;
466         const unsigned nb_tx_queues = 1;
467         struct rte_eth_dev_data *data;
468         struct pmd_internals *internals = NULL;
469         struct rte_eth_dev *eth_dev = NULL;
470
471         static const uint8_t default_rss_key[40] = {
472                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
473                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
474                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
475                 0xBE, 0xAC, 0x01, 0xFA
476         };
477
478         if (dev->device.numa_node == SOCKET_ID_ANY)
479                 dev->device.numa_node = rte_socket_id();
480
481         PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
482                 dev->device.numa_node);
483
484         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
485         if (!eth_dev)
486                 return -ENOMEM;
487
488         /* now put it all together
489          * - store queue data in internals,
490          * - store numa_node info in ethdev data
491          * - point eth_dev_data to internals
492          * - and point eth_dev structure to new eth_dev_data structure
493          */
494         /* NOTE: we'll replace the data element, of originally allocated eth_dev
495          * so the nulls are local per-process */
496
497         internals = eth_dev->data->dev_private;
498         internals->packet_size = packet_size;
499         internals->packet_copy = packet_copy;
500         internals->port_id = eth_dev->data->port_id;
501         rte_eth_random_addr(internals->eth_addr.addr_bytes);
502
503         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
504         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
505
506         rte_memcpy(internals->rss_key, default_rss_key, 40);
507
508         data = eth_dev->data;
509         data->nb_rx_queues = (uint16_t)nb_rx_queues;
510         data->nb_tx_queues = (uint16_t)nb_tx_queues;
511         data->dev_link = pmd_link;
512         data->mac_addrs = &internals->eth_addr;
513
514         eth_dev->dev_ops = &ops;
515
516         /* finally assign rx and tx ops */
517         if (packet_copy) {
518                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
519                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
520         } else {
521                 eth_dev->rx_pkt_burst = eth_null_rx;
522                 eth_dev->tx_pkt_burst = eth_null_tx;
523         }
524
525         rte_eth_dev_probing_finish(eth_dev);
526         return 0;
527 }
528
529 static inline int
530 get_packet_size_arg(const char *key __rte_unused,
531                 const char *value, void *extra_args)
532 {
533         const char *a = value;
534         unsigned *packet_size = extra_args;
535
536         if ((value == NULL) || (extra_args == NULL))
537                 return -EINVAL;
538
539         *packet_size = (unsigned)strtoul(a, NULL, 0);
540         if (*packet_size == UINT_MAX)
541                 return -1;
542
543         return 0;
544 }
545
546 static inline int
547 get_packet_copy_arg(const char *key __rte_unused,
548                 const char *value, void *extra_args)
549 {
550         const char *a = value;
551         unsigned *packet_copy = extra_args;
552
553         if ((value == NULL) || (extra_args == NULL))
554                 return -EINVAL;
555
556         *packet_copy = (unsigned)strtoul(a, NULL, 0);
557         if (*packet_copy == UINT_MAX)
558                 return -1;
559
560         return 0;
561 }
562
563 static int
564 rte_pmd_null_probe(struct rte_vdev_device *dev)
565 {
566         const char *name, *params;
567         unsigned packet_size = default_packet_size;
568         unsigned packet_copy = default_packet_copy;
569         struct rte_kvargs *kvlist = NULL;
570         struct rte_eth_dev *eth_dev;
571         int ret;
572
573         if (!dev)
574                 return -EINVAL;
575
576         name = rte_vdev_device_name(dev);
577         params = rte_vdev_device_args(dev);
578         PMD_LOG(INFO, "Initializing pmd_null for %s", name);
579
580         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
581                 eth_dev = rte_eth_dev_attach_secondary(name);
582                 if (!eth_dev) {
583                         PMD_LOG(ERR, "Failed to probe %s", name);
584                         return -1;
585                 }
586                 /* TODO: request info from primary to set up Rx and Tx */
587                 eth_dev->dev_ops = &ops;
588                 eth_dev->device = &dev->device;
589                 rte_eth_dev_probing_finish(eth_dev);
590                 return 0;
591         }
592
593         if (params != NULL) {
594                 kvlist = rte_kvargs_parse(params, valid_arguments);
595                 if (kvlist == NULL)
596                         return -1;
597
598                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
599
600                         ret = rte_kvargs_process(kvlist,
601                                         ETH_NULL_PACKET_SIZE_ARG,
602                                         &get_packet_size_arg, &packet_size);
603                         if (ret < 0)
604                                 goto free_kvlist;
605                 }
606
607                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
608
609                         ret = rte_kvargs_process(kvlist,
610                                         ETH_NULL_PACKET_COPY_ARG,
611                                         &get_packet_copy_arg, &packet_copy);
612                         if (ret < 0)
613                                 goto free_kvlist;
614                 }
615         }
616
617         PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
618                         "packet copy is %s", packet_size,
619                         packet_copy ? "enabled" : "disabled");
620
621         ret = eth_dev_null_create(dev, packet_size, packet_copy);
622
623 free_kvlist:
624         if (kvlist)
625                 rte_kvargs_free(kvlist);
626         return ret;
627 }
628
629 static int
630 rte_pmd_null_remove(struct rte_vdev_device *dev)
631 {
632         struct rte_eth_dev *eth_dev = NULL;
633
634         if (!dev)
635                 return -EINVAL;
636
637         PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
638                         rte_socket_id());
639
640         /* find the ethdev entry */
641         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
642         if (eth_dev == NULL)
643                 return -1;
644
645         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
646                 /* mac_addrs must not be freed alone because part of dev_private */
647                 eth_dev->data->mac_addrs = NULL;
648
649         rte_eth_dev_release_port(eth_dev);
650
651         return 0;
652 }
653
654 static struct rte_vdev_driver pmd_null_drv = {
655         .probe = rte_pmd_null_probe,
656         .remove = rte_pmd_null_remove,
657 };
658
659 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
660 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
661 RTE_PMD_REGISTER_PARAM_STRING(net_null,
662         "size=<int> "
663         "copy=<int>");
664
665 RTE_INIT(eth_null_init_log)
666 {
667         eth_null_logtype = rte_log_register("pmd.net.null");
668         if (eth_null_logtype >= 0)
669                 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
670 }