drivers: copy fake PCI device info to ethdev data
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_dev.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
41
42 #include "rte_eth_null.h"
43
44 #define ETH_NULL_PACKET_SIZE_ARG        "size"
45 #define ETH_NULL_PACKET_COPY_ARG        "copy"
46
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
49
50 static const char *valid_arguments[] = {
51         ETH_NULL_PACKET_SIZE_ARG,
52         ETH_NULL_PACKET_COPY_ARG,
53         NULL
54 };
55
56 struct pmd_internals;
57
58 struct null_queue {
59         struct pmd_internals *internals;
60
61         struct rte_mempool *mb_pool;
62         struct rte_mbuf *dummy_packet;
63
64         rte_atomic64_t rx_pkts;
65         rte_atomic64_t tx_pkts;
66         rte_atomic64_t err_pkts;
67 };
68
69 struct pmd_internals {
70         unsigned packet_size;
71         unsigned packet_copy;
72         unsigned numa_node;
73
74         unsigned nb_rx_queues;
75         unsigned nb_tx_queues;
76
77         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
78         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
79
80         /** Bit mask of RSS offloads, the bit offset also means flow type */
81         uint64_t flow_type_rss_offloads;
82
83         rte_spinlock_t rss_lock;
84
85         uint16_t reta_size;
86         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
87                         RTE_RETA_GROUP_SIZE];
88
89         uint8_t rss_key[40];                /**< 40-byte hash key. */
90 };
91
92
93 static struct ether_addr eth_addr = { .addr_bytes = {0} };
94 static const char *drivername = "Null PMD";
95 static struct rte_eth_link pmd_link = {
96         .link_speed = 10000,
97         .link_duplex = ETH_LINK_FULL_DUPLEX,
98         .link_status = 0
99 };
100
101 static uint16_t
102 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
103 {
104         int i;
105         struct null_queue *h = q;
106         unsigned packet_size;
107
108         if ((q == NULL) || (bufs == NULL))
109                 return 0;
110
111         packet_size = h->internals->packet_size;
112         for (i = 0; i < nb_bufs; i++) {
113                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
114                 if (!bufs[i])
115                         break;
116                 bufs[i]->data_len = (uint16_t)packet_size;
117                 bufs[i]->pkt_len = packet_size;
118                 bufs[i]->nb_segs = 1;
119                 bufs[i]->next = NULL;
120         }
121
122         rte_atomic64_add(&(h->rx_pkts), i);
123
124         return i;
125 }
126
127 static uint16_t
128 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
129 {
130         int i;
131         struct null_queue *h = q;
132         unsigned packet_size;
133
134         if ((q == NULL) || (bufs == NULL))
135                 return 0;
136
137         packet_size = h->internals->packet_size;
138         for (i = 0; i < nb_bufs; i++) {
139                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
140                 if (!bufs[i])
141                         break;
142                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
143                                         packet_size);
144                 bufs[i]->data_len = (uint16_t)packet_size;
145                 bufs[i]->pkt_len = packet_size;
146                 bufs[i]->nb_segs = 1;
147                 bufs[i]->next = NULL;
148         }
149
150         rte_atomic64_add(&(h->rx_pkts), i);
151
152         return i;
153 }
154
155 static uint16_t
156 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
157 {
158         int i;
159         struct null_queue *h = q;
160
161         if ((q == NULL) || (bufs == NULL))
162                 return 0;
163
164         for (i = 0; i < nb_bufs; i++)
165                 rte_pktmbuf_free(bufs[i]);
166
167         rte_atomic64_add(&(h->tx_pkts), i);
168
169         return i;
170 }
171
172 static uint16_t
173 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
174 {
175         int i;
176         struct null_queue *h = q;
177         unsigned packet_size;
178
179         if ((q == NULL) || (bufs == NULL))
180                 return 0;
181
182         packet_size = h->internals->packet_size;
183         for (i = 0; i < nb_bufs; i++) {
184                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
185                                         packet_size);
186                 rte_pktmbuf_free(bufs[i]);
187         }
188
189         rte_atomic64_add(&(h->tx_pkts), i);
190
191         return i;
192 }
193
194 static int
195 eth_dev_configure(struct rte_eth_dev *dev) {
196         struct pmd_internals *internals;
197
198         internals = dev->data->dev_private;
199         internals->nb_rx_queues = dev->data->nb_rx_queues;
200         internals->nb_tx_queues = dev->data->nb_tx_queues;
201
202         return 0;
203 }
204
205 static int
206 eth_dev_start(struct rte_eth_dev *dev)
207 {
208         if (dev == NULL)
209                 return -EINVAL;
210
211         dev->data->dev_link.link_status = 1;
212         return 0;
213 }
214
215 static void
216 eth_dev_stop(struct rte_eth_dev *dev)
217 {
218         if (dev == NULL)
219                 return;
220
221         dev->data->dev_link.link_status = 0;
222 }
223
224 static int
225 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
226                 uint16_t nb_rx_desc __rte_unused,
227                 unsigned int socket_id __rte_unused,
228                 const struct rte_eth_rxconf *rx_conf __rte_unused,
229                 struct rte_mempool *mb_pool)
230 {
231         struct rte_mbuf *dummy_packet;
232         struct pmd_internals *internals;
233         unsigned packet_size;
234
235         if ((dev == NULL) || (mb_pool == NULL))
236                 return -EINVAL;
237
238         internals = dev->data->dev_private;
239
240         if (rx_queue_id >= internals->nb_rx_queues)
241                 return -ENODEV;
242
243         packet_size = internals->packet_size;
244
245         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
246         dev->data->rx_queues[rx_queue_id] =
247                 &internals->rx_null_queues[rx_queue_id];
248         dummy_packet = rte_zmalloc_socket(NULL,
249                         packet_size, 0, internals->numa_node);
250         if (dummy_packet == NULL)
251                 return -ENOMEM;
252
253         internals->rx_null_queues[rx_queue_id].internals = internals;
254         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
255
256         return 0;
257 }
258
259 static int
260 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
261                 uint16_t nb_tx_desc __rte_unused,
262                 unsigned int socket_id __rte_unused,
263                 const struct rte_eth_txconf *tx_conf __rte_unused)
264 {
265         struct rte_mbuf *dummy_packet;
266         struct pmd_internals *internals;
267         unsigned packet_size;
268
269         if (dev == NULL)
270                 return -EINVAL;
271
272         internals = dev->data->dev_private;
273
274         if (tx_queue_id >= internals->nb_tx_queues)
275                 return -ENODEV;
276
277         packet_size = internals->packet_size;
278
279         dev->data->tx_queues[tx_queue_id] =
280                 &internals->tx_null_queues[tx_queue_id];
281         dummy_packet = rte_zmalloc_socket(NULL,
282                         packet_size, 0, internals->numa_node);
283         if (dummy_packet == NULL)
284                 return -ENOMEM;
285
286         internals->tx_null_queues[tx_queue_id].internals = internals;
287         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
288
289         return 0;
290 }
291
292
293 static void
294 eth_dev_info(struct rte_eth_dev *dev,
295                 struct rte_eth_dev_info *dev_info)
296 {
297         struct pmd_internals *internals;
298
299         if ((dev == NULL) || (dev_info == NULL))
300                 return;
301
302         internals = dev->data->dev_private;
303         dev_info->driver_name = drivername;
304         dev_info->max_mac_addrs = 1;
305         dev_info->max_rx_pktlen = (uint32_t)-1;
306         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
307         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
308         dev_info->min_rx_bufsize = 0;
309         dev_info->pci_dev = NULL;
310         dev_info->reta_size = internals->reta_size;
311         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
312 }
313
314 static void
315 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
316 {
317         unsigned i, num_stats;
318         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
319         const struct pmd_internals *internal;
320
321         if ((dev == NULL) || (igb_stats == NULL))
322                 return;
323
324         internal = dev->data->dev_private;
325         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
326                         RTE_MIN(internal->nb_rx_queues,
327                                 RTE_DIM(internal->rx_null_queues)));
328         for (i = 0; i < num_stats; i++) {
329                 igb_stats->q_ipackets[i] =
330                         internal->rx_null_queues[i].rx_pkts.cnt;
331                 rx_total += igb_stats->q_ipackets[i];
332         }
333
334         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
335                         RTE_MIN(internal->nb_tx_queues,
336                                 RTE_DIM(internal->tx_null_queues)));
337         for (i = 0; i < num_stats; i++) {
338                 igb_stats->q_opackets[i] =
339                         internal->tx_null_queues[i].tx_pkts.cnt;
340                 igb_stats->q_errors[i] =
341                         internal->tx_null_queues[i].err_pkts.cnt;
342                 tx_total += igb_stats->q_opackets[i];
343                 tx_err_total += igb_stats->q_errors[i];
344         }
345
346         igb_stats->ipackets = rx_total;
347         igb_stats->opackets = tx_total;
348         igb_stats->oerrors = tx_err_total;
349 }
350
351 static void
352 eth_stats_reset(struct rte_eth_dev *dev)
353 {
354         unsigned i;
355         struct pmd_internals *internal;
356
357         if (dev == NULL)
358                 return;
359
360         internal = dev->data->dev_private;
361         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
362                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
363         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
364                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
365                 internal->tx_null_queues[i].err_pkts.cnt = 0;
366         }
367 }
368
369 static struct eth_driver rte_null_pmd = {
370         .pci_drv = {
371                 .name = "rte_null_pmd",
372                 .drv_flags = RTE_PCI_DRV_DETACHABLE,
373         },
374 };
375
376 static void
377 eth_queue_release(void *q)
378 {
379         struct null_queue *nq;
380
381         if (q == NULL)
382                 return;
383
384         nq = q;
385         rte_free(nq->dummy_packet);
386 }
387
388 static int
389 eth_link_update(struct rte_eth_dev *dev __rte_unused,
390                 int wait_to_complete __rte_unused) { return 0; }
391
392 static int
393 eth_rss_reta_update(struct rte_eth_dev *dev,
394                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
395 {
396         int i, j;
397         struct pmd_internals *internal = dev->data->dev_private;
398
399         if (reta_size != internal->reta_size)
400                 return -EINVAL;
401
402         rte_spinlock_lock(&internal->rss_lock);
403
404         /* Copy RETA table */
405         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
406                 internal->reta_conf[i].mask = reta_conf[i].mask;
407                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
408                         if ((reta_conf[i].mask >> j) & 0x01)
409                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
410         }
411
412         rte_spinlock_unlock(&internal->rss_lock);
413
414         return 0;
415 }
416
417 static int
418 eth_rss_reta_query(struct rte_eth_dev *dev,
419                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
420 {
421         int i, j;
422         struct pmd_internals *internal = dev->data->dev_private;
423
424         if (reta_size != internal->reta_size)
425                 return -EINVAL;
426
427         rte_spinlock_lock(&internal->rss_lock);
428
429         /* Copy RETA table */
430         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
431                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
432                         if ((reta_conf[i].mask >> j) & 0x01)
433                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
434         }
435
436         rte_spinlock_unlock(&internal->rss_lock);
437
438         return 0;
439 }
440
441 static int
442 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
443 {
444         struct pmd_internals *internal = dev->data->dev_private;
445
446         rte_spinlock_lock(&internal->rss_lock);
447
448         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
449                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
450                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
451
452         if (rss_conf->rss_key)
453                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
454
455         rte_spinlock_unlock(&internal->rss_lock);
456
457         return 0;
458 }
459
460 static int
461 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
462                 struct rte_eth_rss_conf *rss_conf)
463 {
464         struct pmd_internals *internal = dev->data->dev_private;
465
466         rte_spinlock_lock(&internal->rss_lock);
467
468         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
469         if (rss_conf->rss_key)
470                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
471
472         rte_spinlock_unlock(&internal->rss_lock);
473
474         return 0;
475 }
476
477 static const struct eth_dev_ops ops = {
478         .dev_start = eth_dev_start,
479         .dev_stop = eth_dev_stop,
480         .dev_configure = eth_dev_configure,
481         .dev_infos_get = eth_dev_info,
482         .rx_queue_setup = eth_rx_queue_setup,
483         .tx_queue_setup = eth_tx_queue_setup,
484         .rx_queue_release = eth_queue_release,
485         .tx_queue_release = eth_queue_release,
486         .link_update = eth_link_update,
487         .stats_get = eth_stats_get,
488         .stats_reset = eth_stats_reset,
489         .reta_update = eth_rss_reta_update,
490         .reta_query = eth_rss_reta_query,
491         .rss_hash_update = eth_rss_hash_update,
492         .rss_hash_conf_get = eth_rss_hash_conf_get
493 };
494
495 int
496 eth_dev_null_create(const char *name,
497                 const unsigned numa_node,
498                 unsigned packet_size,
499                 unsigned packet_copy)
500 {
501         const unsigned nb_rx_queues = 1;
502         const unsigned nb_tx_queues = 1;
503         struct rte_eth_dev_data *data = NULL;
504         struct rte_pci_device *pci_dev = NULL;
505         struct pmd_internals *internals = NULL;
506         struct rte_eth_dev *eth_dev = NULL;
507
508         static const uint8_t default_rss_key[40] = {
509                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
510                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
511                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
512                 0xBE, 0xAC, 0x01, 0xFA
513         };
514
515         if (name == NULL)
516                 return -EINVAL;
517
518         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
519                         numa_node);
520
521         /* now do all data allocation - for eth_dev structure, dummy pci driver
522          * and internal (private) data
523          */
524         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
525         if (data == NULL)
526                 goto error;
527
528         pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
529         if (pci_dev == NULL)
530                 goto error;
531
532         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
533         if (internals == NULL)
534                 goto error;
535
536         /* reserve an ethdev entry */
537         eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
538         if (eth_dev == NULL)
539                 goto error;
540
541         /* now put it all together
542          * - store queue data in internals,
543          * - store numa_node info in ethdev data
544          * - point eth_dev_data to internals
545          * - and point eth_dev structure to new eth_dev_data structure
546          */
547         /* NOTE: we'll replace the data element, of originally allocated eth_dev
548          * so the nulls are local per-process */
549
550         internals->nb_rx_queues = nb_rx_queues;
551         internals->nb_tx_queues = nb_tx_queues;
552         internals->packet_size = packet_size;
553         internals->packet_copy = packet_copy;
554         internals->numa_node = numa_node;
555
556         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
557         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
558
559         rte_memcpy(internals->rss_key, default_rss_key, 40);
560
561         pci_dev->numa_node = numa_node;
562         pci_dev->driver = &rte_null_pmd.pci_drv;
563
564         data->dev_private = internals;
565         data->port_id = eth_dev->data->port_id;
566         data->nb_rx_queues = (uint16_t)nb_rx_queues;
567         data->nb_tx_queues = (uint16_t)nb_tx_queues;
568         data->dev_link = pmd_link;
569         data->mac_addrs = &eth_addr;
570         strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
571
572         eth_dev->data = data;
573         eth_dev->dev_ops = &ops;
574         eth_dev->pci_dev = pci_dev;
575         eth_dev->driver = &rte_null_pmd;
576         TAILQ_INIT(&eth_dev->link_intr_cbs);
577
578         eth_dev->driver = NULL;
579         eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
580         eth_dev->data->kdrv = RTE_KDRV_NONE;
581         eth_dev->data->drv_name = drivername;
582         eth_dev->data->numa_node = numa_node;
583
584         /* finally assign rx and tx ops */
585         if (packet_copy) {
586                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
587                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
588         } else {
589                 eth_dev->rx_pkt_burst = eth_null_rx;
590                 eth_dev->tx_pkt_burst = eth_null_tx;
591         }
592
593         return 0;
594
595 error:
596         rte_free(data);
597         rte_free(pci_dev);
598         rte_free(internals);
599
600         return -1;
601 }
602
603 static inline int
604 get_packet_size_arg(const char *key __rte_unused,
605                 const char *value, void *extra_args)
606 {
607         const char *a = value;
608         unsigned *packet_size = extra_args;
609
610         if ((value == NULL) || (extra_args == NULL))
611                 return -EINVAL;
612
613         *packet_size = (unsigned)strtoul(a, NULL, 0);
614         if (*packet_size == UINT_MAX)
615                 return -1;
616
617         return 0;
618 }
619
620 static inline int
621 get_packet_copy_arg(const char *key __rte_unused,
622                 const char *value, void *extra_args)
623 {
624         const char *a = value;
625         unsigned *packet_copy = extra_args;
626
627         if ((value == NULL) || (extra_args == NULL))
628                 return -EINVAL;
629
630         *packet_copy = (unsigned)strtoul(a, NULL, 0);
631         if (*packet_copy == UINT_MAX)
632                 return -1;
633
634         return 0;
635 }
636
637 static int
638 rte_pmd_null_devinit(const char *name, const char *params)
639 {
640         unsigned numa_node;
641         unsigned packet_size = default_packet_size;
642         unsigned packet_copy = default_packet_copy;
643         struct rte_kvargs *kvlist = NULL;
644         int ret;
645
646         if (name == NULL)
647                 return -EINVAL;
648
649         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
650
651         numa_node = rte_socket_id();
652
653         if (params != NULL) {
654                 kvlist = rte_kvargs_parse(params, valid_arguments);
655                 if (kvlist == NULL)
656                         return -1;
657
658                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
659
660                         ret = rte_kvargs_process(kvlist,
661                                         ETH_NULL_PACKET_SIZE_ARG,
662                                         &get_packet_size_arg, &packet_size);
663                         if (ret < 0)
664                                 goto free_kvlist;
665                 }
666
667                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
668
669                         ret = rte_kvargs_process(kvlist,
670                                         ETH_NULL_PACKET_COPY_ARG,
671                                         &get_packet_copy_arg, &packet_copy);
672                         if (ret < 0)
673                                 goto free_kvlist;
674                 }
675         }
676
677         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
678                         "packet copy is %s\n", packet_size,
679                         packet_copy ? "enabled" : "disabled");
680
681         ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
682
683 free_kvlist:
684         if (kvlist)
685                 rte_kvargs_free(kvlist);
686         return ret;
687 }
688
689 static int
690 rte_pmd_null_devuninit(const char *name)
691 {
692         struct rte_eth_dev *eth_dev = NULL;
693
694         if (name == NULL)
695                 return -EINVAL;
696
697         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
698                         rte_socket_id());
699
700         /* reserve an ethdev entry */
701         eth_dev = rte_eth_dev_allocated(name);
702         if (eth_dev == NULL)
703                 return -1;
704
705         rte_free(eth_dev->data->dev_private);
706         rte_free(eth_dev->data);
707         rte_free(eth_dev->pci_dev);
708
709         rte_eth_dev_release_port(eth_dev);
710
711         return 0;
712 }
713
714 static struct rte_driver pmd_null_drv = {
715         .name = "eth_null",
716         .type = PMD_VDEV,
717         .init = rte_pmd_null_devinit,
718         .uninit = rte_pmd_null_devuninit,
719 };
720
721 PMD_REGISTER_DRIVER(pmd_null_drv);