ethdev: add device flag to bypass auto-filled queue xstats
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) IGEL Co.,Ltd.
3  *  All rights reserved.
4  */
5
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
14
15 #define ETH_NULL_PACKET_SIZE_ARG        "size"
16 #define ETH_NULL_PACKET_COPY_ARG        "copy"
17 #define ETH_NULL_PACKET_NO_RX_ARG       "no-rx"
18
19 static unsigned int default_packet_size = 64;
20 static unsigned int default_packet_copy;
21 static unsigned int default_no_rx;
22
23 static const char *valid_arguments[] = {
24         ETH_NULL_PACKET_SIZE_ARG,
25         ETH_NULL_PACKET_COPY_ARG,
26         ETH_NULL_PACKET_NO_RX_ARG,
27         NULL
28 };
29
30 struct pmd_internals;
31
32 struct null_queue {
33         struct pmd_internals *internals;
34
35         struct rte_mempool *mb_pool;
36         struct rte_mbuf *dummy_packet;
37
38         rte_atomic64_t rx_pkts;
39         rte_atomic64_t tx_pkts;
40 };
41
42 struct pmd_options {
43         unsigned int packet_copy;
44         unsigned int packet_size;
45         unsigned int no_rx;
46 };
47
48 struct pmd_internals {
49         unsigned int packet_size;
50         unsigned int packet_copy;
51         unsigned int no_rx;
52         uint16_t port_id;
53
54         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
55         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
56
57         struct rte_ether_addr eth_addr;
58         /** Bit mask of RSS offloads, the bit offset also means flow type */
59         uint64_t flow_type_rss_offloads;
60
61         rte_spinlock_t rss_lock;
62
63         uint16_t reta_size;
64         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
65                         RTE_RETA_GROUP_SIZE];
66
67         uint8_t rss_key[40];                /**< 40-byte hash key. */
68 };
69 static struct rte_eth_link pmd_link = {
70         .link_speed = ETH_SPEED_NUM_10G,
71         .link_duplex = ETH_LINK_FULL_DUPLEX,
72         .link_status = ETH_LINK_DOWN,
73         .link_autoneg = ETH_LINK_FIXED,
74 };
75
76 RTE_LOG_REGISTER(eth_null_logtype, pmd.net.null, NOTICE);
77
78 #define PMD_LOG(level, fmt, args...) \
79         rte_log(RTE_LOG_ ## level, eth_null_logtype, \
80                 "%s(): " fmt "\n", __func__, ##args)
81
82 static uint16_t
83 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
84 {
85         int i;
86         struct null_queue *h = q;
87         unsigned int packet_size;
88
89         if ((q == NULL) || (bufs == NULL))
90                 return 0;
91
92         packet_size = h->internals->packet_size;
93         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
94                 return 0;
95
96         for (i = 0; i < nb_bufs; i++) {
97                 bufs[i]->data_len = (uint16_t)packet_size;
98                 bufs[i]->pkt_len = packet_size;
99                 bufs[i]->port = h->internals->port_id;
100         }
101
102         rte_atomic64_add(&(h->rx_pkts), i);
103
104         return i;
105 }
106
107 static uint16_t
108 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
109 {
110         int i;
111         struct null_queue *h = q;
112         unsigned int packet_size;
113
114         if ((q == NULL) || (bufs == NULL))
115                 return 0;
116
117         packet_size = h->internals->packet_size;
118         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
119                 return 0;
120
121         for (i = 0; i < nb_bufs; i++) {
122                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
123                                         packet_size);
124                 bufs[i]->data_len = (uint16_t)packet_size;
125                 bufs[i]->pkt_len = packet_size;
126                 bufs[i]->port = h->internals->port_id;
127         }
128
129         rte_atomic64_add(&(h->rx_pkts), i);
130
131         return i;
132 }
133
134 static uint16_t
135 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
136                 uint16_t nb_bufs __rte_unused)
137 {
138         return 0;
139 }
140
141 static uint16_t
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143 {
144         int i;
145         struct null_queue *h = q;
146
147         if ((q == NULL) || (bufs == NULL))
148                 return 0;
149
150         for (i = 0; i < nb_bufs; i++)
151                 rte_pktmbuf_free(bufs[i]);
152
153         rte_atomic64_add(&(h->tx_pkts), i);
154
155         return i;
156 }
157
158 static uint16_t
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
160 {
161         int i;
162         struct null_queue *h = q;
163         unsigned int packet_size;
164
165         if ((q == NULL) || (bufs == NULL))
166                 return 0;
167
168         packet_size = h->internals->packet_size;
169         for (i = 0; i < nb_bufs; i++) {
170                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
171                                         packet_size);
172                 rte_pktmbuf_free(bufs[i]);
173         }
174
175         rte_atomic64_add(&(h->tx_pkts), i);
176
177         return i;
178 }
179
180 static int
181 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
182 {
183         return 0;
184 }
185
186 static int
187 eth_dev_start(struct rte_eth_dev *dev)
188 {
189         if (dev == NULL)
190                 return -EINVAL;
191
192         dev->data->dev_link.link_status = ETH_LINK_UP;
193         return 0;
194 }
195
196 static int
197 eth_dev_stop(struct rte_eth_dev *dev)
198 {
199         if (dev == NULL)
200                 return 0;
201
202         dev->data->dev_link.link_status = ETH_LINK_DOWN;
203
204         return 0;
205 }
206
207 static int
208 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
209                 uint16_t nb_rx_desc __rte_unused,
210                 unsigned int socket_id __rte_unused,
211                 const struct rte_eth_rxconf *rx_conf __rte_unused,
212                 struct rte_mempool *mb_pool)
213 {
214         struct rte_mbuf *dummy_packet;
215         struct pmd_internals *internals;
216         unsigned int packet_size;
217
218         if ((dev == NULL) || (mb_pool == NULL))
219                 return -EINVAL;
220
221         internals = dev->data->dev_private;
222
223         if (rx_queue_id >= dev->data->nb_rx_queues)
224                 return -ENODEV;
225
226         packet_size = internals->packet_size;
227
228         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
229         dev->data->rx_queues[rx_queue_id] =
230                 &internals->rx_null_queues[rx_queue_id];
231         dummy_packet = rte_zmalloc_socket(NULL,
232                         packet_size, 0, dev->data->numa_node);
233         if (dummy_packet == NULL)
234                 return -ENOMEM;
235
236         internals->rx_null_queues[rx_queue_id].internals = internals;
237         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
238
239         return 0;
240 }
241
242 static int
243 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
244                 uint16_t nb_tx_desc __rte_unused,
245                 unsigned int socket_id __rte_unused,
246                 const struct rte_eth_txconf *tx_conf __rte_unused)
247 {
248         struct rte_mbuf *dummy_packet;
249         struct pmd_internals *internals;
250         unsigned int packet_size;
251
252         if (dev == NULL)
253                 return -EINVAL;
254
255         internals = dev->data->dev_private;
256
257         if (tx_queue_id >= dev->data->nb_tx_queues)
258                 return -ENODEV;
259
260         packet_size = internals->packet_size;
261
262         dev->data->tx_queues[tx_queue_id] =
263                 &internals->tx_null_queues[tx_queue_id];
264         dummy_packet = rte_zmalloc_socket(NULL,
265                         packet_size, 0, dev->data->numa_node);
266         if (dummy_packet == NULL)
267                 return -ENOMEM;
268
269         internals->tx_null_queues[tx_queue_id].internals = internals;
270         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
271
272         return 0;
273 }
274
275 static int
276 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
277 {
278         return 0;
279 }
280
281 static int
282 eth_dev_info(struct rte_eth_dev *dev,
283                 struct rte_eth_dev_info *dev_info)
284 {
285         struct pmd_internals *internals;
286
287         if ((dev == NULL) || (dev_info == NULL))
288                 return -EINVAL;
289
290         internals = dev->data->dev_private;
291         dev_info->max_mac_addrs = 1;
292         dev_info->max_rx_pktlen = (uint32_t)-1;
293         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
294         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
295         dev_info->min_rx_bufsize = 0;
296         dev_info->reta_size = internals->reta_size;
297         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
298
299         return 0;
300 }
301
302 static int
303 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
304 {
305         unsigned int i, num_stats;
306         unsigned long rx_total = 0, tx_total = 0;
307         const struct pmd_internals *internal;
308
309         if ((dev == NULL) || (igb_stats == NULL))
310                 return -EINVAL;
311
312         internal = dev->data->dev_private;
313         num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
314                         RTE_MIN(dev->data->nb_rx_queues,
315                                 RTE_DIM(internal->rx_null_queues)));
316         for (i = 0; i < num_stats; i++) {
317                 igb_stats->q_ipackets[i] =
318                         internal->rx_null_queues[i].rx_pkts.cnt;
319                 rx_total += igb_stats->q_ipackets[i];
320         }
321
322         num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
323                         RTE_MIN(dev->data->nb_tx_queues,
324                                 RTE_DIM(internal->tx_null_queues)));
325         for (i = 0; i < num_stats; i++) {
326                 igb_stats->q_opackets[i] =
327                         internal->tx_null_queues[i].tx_pkts.cnt;
328                 tx_total += igb_stats->q_opackets[i];
329         }
330
331         igb_stats->ipackets = rx_total;
332         igb_stats->opackets = tx_total;
333
334         return 0;
335 }
336
337 static int
338 eth_stats_reset(struct rte_eth_dev *dev)
339 {
340         unsigned int i;
341         struct pmd_internals *internal;
342
343         if (dev == NULL)
344                 return -EINVAL;
345
346         internal = dev->data->dev_private;
347         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
348                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
349         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
350                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
351
352         return 0;
353 }
354
355 static void
356 eth_queue_release(void *q)
357 {
358         struct null_queue *nq;
359
360         if (q == NULL)
361                 return;
362
363         nq = q;
364         rte_free(nq->dummy_packet);
365 }
366
367 static int
368 eth_link_update(struct rte_eth_dev *dev __rte_unused,
369                 int wait_to_complete __rte_unused) { return 0; }
370
371 static int
372 eth_rss_reta_update(struct rte_eth_dev *dev,
373                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
374 {
375         int i, j;
376         struct pmd_internals *internal = dev->data->dev_private;
377
378         if (reta_size != internal->reta_size)
379                 return -EINVAL;
380
381         rte_spinlock_lock(&internal->rss_lock);
382
383         /* Copy RETA table */
384         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
385                 internal->reta_conf[i].mask = reta_conf[i].mask;
386                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
387                         if ((reta_conf[i].mask >> j) & 0x01)
388                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
389         }
390
391         rte_spinlock_unlock(&internal->rss_lock);
392
393         return 0;
394 }
395
396 static int
397 eth_rss_reta_query(struct rte_eth_dev *dev,
398                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
399 {
400         int i, j;
401         struct pmd_internals *internal = dev->data->dev_private;
402
403         if (reta_size != internal->reta_size)
404                 return -EINVAL;
405
406         rte_spinlock_lock(&internal->rss_lock);
407
408         /* Copy RETA table */
409         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
410                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
411                         if ((reta_conf[i].mask >> j) & 0x01)
412                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
413         }
414
415         rte_spinlock_unlock(&internal->rss_lock);
416
417         return 0;
418 }
419
420 static int
421 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
422 {
423         struct pmd_internals *internal = dev->data->dev_private;
424
425         rte_spinlock_lock(&internal->rss_lock);
426
427         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
428                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
429                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
430
431         if (rss_conf->rss_key)
432                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
433
434         rte_spinlock_unlock(&internal->rss_lock);
435
436         return 0;
437 }
438
439 static int
440 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
441                 struct rte_eth_rss_conf *rss_conf)
442 {
443         struct pmd_internals *internal = dev->data->dev_private;
444
445         rte_spinlock_lock(&internal->rss_lock);
446
447         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
448         if (rss_conf->rss_key)
449                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
450
451         rte_spinlock_unlock(&internal->rss_lock);
452
453         return 0;
454 }
455
456 static int
457 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
458                     __rte_unused struct rte_ether_addr *addr)
459 {
460         return 0;
461 }
462
463 static int
464 eth_dev_close(struct rte_eth_dev *dev)
465 {
466         PMD_LOG(INFO, "Closing null ethdev on NUMA socket %u",
467                         rte_socket_id());
468
469         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
470                 return 0;
471
472         /* mac_addrs must not be freed alone because part of dev_private */
473         dev->data->mac_addrs = NULL;
474
475         return 0;
476 }
477
478 static const struct eth_dev_ops ops = {
479         .dev_close = eth_dev_close,
480         .dev_start = eth_dev_start,
481         .dev_stop = eth_dev_stop,
482         .dev_configure = eth_dev_configure,
483         .dev_infos_get = eth_dev_info,
484         .rx_queue_setup = eth_rx_queue_setup,
485         .tx_queue_setup = eth_tx_queue_setup,
486         .rx_queue_release = eth_queue_release,
487         .tx_queue_release = eth_queue_release,
488         .mtu_set = eth_mtu_set,
489         .link_update = eth_link_update,
490         .mac_addr_set = eth_mac_address_set,
491         .stats_get = eth_stats_get,
492         .stats_reset = eth_stats_reset,
493         .reta_update = eth_rss_reta_update,
494         .reta_query = eth_rss_reta_query,
495         .rss_hash_update = eth_rss_hash_update,
496         .rss_hash_conf_get = eth_rss_hash_conf_get
497 };
498
499 static int
500 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
501 {
502         const unsigned int nb_rx_queues = 1;
503         const unsigned int nb_tx_queues = 1;
504         struct rte_eth_dev_data *data;
505         struct pmd_internals *internals = NULL;
506         struct rte_eth_dev *eth_dev = NULL;
507
508         static const uint8_t default_rss_key[40] = {
509                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
510                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
511                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
512                 0xBE, 0xAC, 0x01, 0xFA
513         };
514
515         if (dev->device.numa_node == SOCKET_ID_ANY)
516                 dev->device.numa_node = rte_socket_id();
517
518         PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
519                 dev->device.numa_node);
520
521         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
522         if (!eth_dev)
523                 return -ENOMEM;
524
525         /* now put it all together
526          * - store queue data in internals,
527          * - store numa_node info in ethdev data
528          * - point eth_dev_data to internals
529          * - and point eth_dev structure to new eth_dev_data structure
530          */
531         /* NOTE: we'll replace the data element, of originally allocated eth_dev
532          * so the nulls are local per-process */
533
534         internals = eth_dev->data->dev_private;
535         internals->packet_size = args->packet_size;
536         internals->packet_copy = args->packet_copy;
537         internals->no_rx = args->no_rx;
538         internals->port_id = eth_dev->data->port_id;
539         rte_eth_random_addr(internals->eth_addr.addr_bytes);
540
541         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
542         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
543
544         rte_memcpy(internals->rss_key, default_rss_key, 40);
545
546         data = eth_dev->data;
547         data->nb_rx_queues = (uint16_t)nb_rx_queues;
548         data->nb_tx_queues = (uint16_t)nb_tx_queues;
549         data->dev_link = pmd_link;
550         data->mac_addrs = &internals->eth_addr;
551         data->promiscuous = 1;
552         data->all_multicast = 1;
553         data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
554
555         eth_dev->dev_ops = &ops;
556
557         /* finally assign rx and tx ops */
558         if (internals->packet_copy) {
559                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
560                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
561         } else if (internals->no_rx) {
562                 eth_dev->rx_pkt_burst = eth_null_no_rx;
563                 eth_dev->tx_pkt_burst = eth_null_tx;
564         } else {
565                 eth_dev->rx_pkt_burst = eth_null_rx;
566                 eth_dev->tx_pkt_burst = eth_null_tx;
567         }
568
569         rte_eth_dev_probing_finish(eth_dev);
570         return 0;
571 }
572
573 static inline int
574 get_packet_size_arg(const char *key __rte_unused,
575                 const char *value, void *extra_args)
576 {
577         const char *a = value;
578         unsigned int *packet_size = extra_args;
579
580         if ((value == NULL) || (extra_args == NULL))
581                 return -EINVAL;
582
583         *packet_size = (unsigned int)strtoul(a, NULL, 0);
584         if (*packet_size == UINT_MAX)
585                 return -1;
586
587         return 0;
588 }
589
590 static inline int
591 get_packet_copy_arg(const char *key __rte_unused,
592                 const char *value, void *extra_args)
593 {
594         const char *a = value;
595         unsigned int *packet_copy = extra_args;
596
597         if ((value == NULL) || (extra_args == NULL))
598                 return -EINVAL;
599
600         *packet_copy = (unsigned int)strtoul(a, NULL, 0);
601         if (*packet_copy == UINT_MAX)
602                 return -1;
603
604         return 0;
605 }
606
607 static int
608 get_packet_no_rx_arg(const char *key __rte_unused,
609                 const char *value, void *extra_args)
610 {
611         const char *a = value;
612         unsigned int no_rx;
613
614         if (value == NULL || extra_args == NULL)
615                 return -EINVAL;
616
617         no_rx = (unsigned int)strtoul(a, NULL, 0);
618         if (no_rx != 0 && no_rx != 1)
619                 return -1;
620
621         *(unsigned int *)extra_args = no_rx;
622         return 0;
623 }
624
625 static int
626 rte_pmd_null_probe(struct rte_vdev_device *dev)
627 {
628         const char *name, *params;
629         struct pmd_options args = {
630                 .packet_copy = default_packet_copy,
631                 .packet_size = default_packet_size,
632                 .no_rx = default_no_rx,
633         };
634         struct rte_kvargs *kvlist = NULL;
635         struct rte_eth_dev *eth_dev;
636         int ret;
637
638         if (!dev)
639                 return -EINVAL;
640
641         name = rte_vdev_device_name(dev);
642         params = rte_vdev_device_args(dev);
643         PMD_LOG(INFO, "Initializing pmd_null for %s", name);
644
645         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
646                 struct pmd_internals *internals;
647                 eth_dev = rte_eth_dev_attach_secondary(name);
648                 if (!eth_dev) {
649                         PMD_LOG(ERR, "Failed to probe %s", name);
650                         return -1;
651                 }
652                 /* TODO: request info from primary to set up Rx and Tx */
653                 eth_dev->dev_ops = &ops;
654                 eth_dev->device = &dev->device;
655                 internals = eth_dev->data->dev_private;
656                 if (internals->packet_copy) {
657                         eth_dev->rx_pkt_burst = eth_null_copy_rx;
658                         eth_dev->tx_pkt_burst = eth_null_copy_tx;
659                 } else if (internals->no_rx) {
660                         eth_dev->rx_pkt_burst = eth_null_no_rx;
661                         eth_dev->tx_pkt_burst = eth_null_tx;
662                 } else {
663                         eth_dev->rx_pkt_burst = eth_null_rx;
664                         eth_dev->tx_pkt_burst = eth_null_tx;
665                 }
666                 rte_eth_dev_probing_finish(eth_dev);
667                 return 0;
668         }
669
670         if (params != NULL) {
671                 kvlist = rte_kvargs_parse(params, valid_arguments);
672                 if (kvlist == NULL)
673                         return -1;
674
675                 ret = rte_kvargs_process(kvlist,
676                                 ETH_NULL_PACKET_SIZE_ARG,
677                                 &get_packet_size_arg, &args.packet_size);
678                 if (ret < 0)
679                         goto free_kvlist;
680
681
682                 ret = rte_kvargs_process(kvlist,
683                                 ETH_NULL_PACKET_COPY_ARG,
684                                 &get_packet_copy_arg, &args.packet_copy);
685                 if (ret < 0)
686                         goto free_kvlist;
687
688                 ret = rte_kvargs_process(kvlist,
689                                 ETH_NULL_PACKET_NO_RX_ARG,
690                                 &get_packet_no_rx_arg, &args.no_rx);
691                 if (ret < 0)
692                         goto free_kvlist;
693
694                 if (args.no_rx && args.packet_copy) {
695                         PMD_LOG(ERR,
696                                 "Both %s and %s arguments at the same time not supported",
697                                 ETH_NULL_PACKET_COPY_ARG,
698                                 ETH_NULL_PACKET_NO_RX_ARG);
699                         goto free_kvlist;
700                 }
701         }
702
703         PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
704                         "packet copy is %s", args.packet_size,
705                         args.packet_copy ? "enabled" : "disabled");
706
707         ret = eth_dev_null_create(dev, &args);
708
709 free_kvlist:
710         if (kvlist)
711                 rte_kvargs_free(kvlist);
712         return ret;
713 }
714
715 static int
716 rte_pmd_null_remove(struct rte_vdev_device *dev)
717 {
718         struct rte_eth_dev *eth_dev = NULL;
719
720         if (!dev)
721                 return -EINVAL;
722
723         /* find the ethdev entry */
724         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
725         if (eth_dev == NULL)
726                 return 0; /* port already released */
727
728         eth_dev_close(eth_dev);
729         rte_eth_dev_release_port(eth_dev);
730
731         return 0;
732 }
733
734 static struct rte_vdev_driver pmd_null_drv = {
735         .probe = rte_pmd_null_probe,
736         .remove = rte_pmd_null_remove,
737 };
738
739 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
740 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
741 RTE_PMD_REGISTER_PARAM_STRING(net_null,
742         "size=<int> "
743         "copy=<int> "
744         ETH_NULL_PACKET_NO_RX_ARG "=0|1");