net: remove dead driver names
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_vdev.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
41
42 #include "rte_eth_null.h"
43
44 #define ETH_NULL_PACKET_SIZE_ARG        "size"
45 #define ETH_NULL_PACKET_COPY_ARG        "copy"
46
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
49
50 static const char *valid_arguments[] = {
51         ETH_NULL_PACKET_SIZE_ARG,
52         ETH_NULL_PACKET_COPY_ARG,
53         NULL
54 };
55
56 struct pmd_internals;
57
58 struct null_queue {
59         struct pmd_internals *internals;
60
61         struct rte_mempool *mb_pool;
62         struct rte_mbuf *dummy_packet;
63
64         rte_atomic64_t rx_pkts;
65         rte_atomic64_t tx_pkts;
66         rte_atomic64_t err_pkts;
67 };
68
69 struct pmd_internals {
70         unsigned packet_size;
71         unsigned packet_copy;
72         uint8_t port_id;
73
74         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76
77         /** Bit mask of RSS offloads, the bit offset also means flow type */
78         uint64_t flow_type_rss_offloads;
79
80         rte_spinlock_t rss_lock;
81
82         uint16_t reta_size;
83         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84                         RTE_RETA_GROUP_SIZE];
85
86         uint8_t rss_key[40];                /**< 40-byte hash key. */
87 };
88
89
90 static struct ether_addr eth_addr = { .addr_bytes = {0} };
91 static struct rte_eth_link pmd_link = {
92         .link_speed = ETH_SPEED_NUM_10G,
93         .link_duplex = ETH_LINK_FULL_DUPLEX,
94         .link_status = ETH_LINK_DOWN,
95         .link_autoneg = ETH_LINK_SPEED_AUTONEG,
96 };
97
98 static uint16_t
99 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
100 {
101         int i;
102         struct null_queue *h = q;
103         unsigned packet_size;
104
105         if ((q == NULL) || (bufs == NULL))
106                 return 0;
107
108         packet_size = h->internals->packet_size;
109         for (i = 0; i < nb_bufs; i++) {
110                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
111                 if (!bufs[i])
112                         break;
113                 bufs[i]->data_len = (uint16_t)packet_size;
114                 bufs[i]->pkt_len = packet_size;
115                 bufs[i]->nb_segs = 1;
116                 bufs[i]->next = NULL;
117                 bufs[i]->port = h->internals->port_id;
118         }
119
120         rte_atomic64_add(&(h->rx_pkts), i);
121
122         return i;
123 }
124
125 static uint16_t
126 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
127 {
128         int i;
129         struct null_queue *h = q;
130         unsigned packet_size;
131
132         if ((q == NULL) || (bufs == NULL))
133                 return 0;
134
135         packet_size = h->internals->packet_size;
136         for (i = 0; i < nb_bufs; i++) {
137                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
138                 if (!bufs[i])
139                         break;
140                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
141                                         packet_size);
142                 bufs[i]->data_len = (uint16_t)packet_size;
143                 bufs[i]->pkt_len = packet_size;
144                 bufs[i]->nb_segs = 1;
145                 bufs[i]->next = NULL;
146                 bufs[i]->port = h->internals->port_id;
147         }
148
149         rte_atomic64_add(&(h->rx_pkts), i);
150
151         return i;
152 }
153
154 static uint16_t
155 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
156 {
157         int i;
158         struct null_queue *h = q;
159
160         if ((q == NULL) || (bufs == NULL))
161                 return 0;
162
163         for (i = 0; i < nb_bufs; i++)
164                 rte_pktmbuf_free(bufs[i]);
165
166         rte_atomic64_add(&(h->tx_pkts), i);
167
168         return i;
169 }
170
171 static uint16_t
172 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
173 {
174         int i;
175         struct null_queue *h = q;
176         unsigned packet_size;
177
178         if ((q == NULL) || (bufs == NULL))
179                 return 0;
180
181         packet_size = h->internals->packet_size;
182         for (i = 0; i < nb_bufs; i++) {
183                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
184                                         packet_size);
185                 rte_pktmbuf_free(bufs[i]);
186         }
187
188         rte_atomic64_add(&(h->tx_pkts), i);
189
190         return i;
191 }
192
193 static int
194 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
195 {
196         return 0;
197 }
198
199 static int
200 eth_dev_start(struct rte_eth_dev *dev)
201 {
202         if (dev == NULL)
203                 return -EINVAL;
204
205         dev->data->dev_link.link_status = ETH_LINK_UP;
206         return 0;
207 }
208
209 static void
210 eth_dev_stop(struct rte_eth_dev *dev)
211 {
212         if (dev == NULL)
213                 return;
214
215         dev->data->dev_link.link_status = ETH_LINK_DOWN;
216 }
217
218 static int
219 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
220                 uint16_t nb_rx_desc __rte_unused,
221                 unsigned int socket_id __rte_unused,
222                 const struct rte_eth_rxconf *rx_conf __rte_unused,
223                 struct rte_mempool *mb_pool)
224 {
225         struct rte_mbuf *dummy_packet;
226         struct pmd_internals *internals;
227         unsigned packet_size;
228
229         if ((dev == NULL) || (mb_pool == NULL))
230                 return -EINVAL;
231
232         internals = dev->data->dev_private;
233
234         if (rx_queue_id >= dev->data->nb_rx_queues)
235                 return -ENODEV;
236
237         packet_size = internals->packet_size;
238
239         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
240         dev->data->rx_queues[rx_queue_id] =
241                 &internals->rx_null_queues[rx_queue_id];
242         dummy_packet = rte_zmalloc_socket(NULL,
243                         packet_size, 0, dev->data->numa_node);
244         if (dummy_packet == NULL)
245                 return -ENOMEM;
246
247         internals->rx_null_queues[rx_queue_id].internals = internals;
248         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
249
250         return 0;
251 }
252
253 static int
254 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
255                 uint16_t nb_tx_desc __rte_unused,
256                 unsigned int socket_id __rte_unused,
257                 const struct rte_eth_txconf *tx_conf __rte_unused)
258 {
259         struct rte_mbuf *dummy_packet;
260         struct pmd_internals *internals;
261         unsigned packet_size;
262
263         if (dev == NULL)
264                 return -EINVAL;
265
266         internals = dev->data->dev_private;
267
268         if (tx_queue_id >= dev->data->nb_tx_queues)
269                 return -ENODEV;
270
271         packet_size = internals->packet_size;
272
273         dev->data->tx_queues[tx_queue_id] =
274                 &internals->tx_null_queues[tx_queue_id];
275         dummy_packet = rte_zmalloc_socket(NULL,
276                         packet_size, 0, dev->data->numa_node);
277         if (dummy_packet == NULL)
278                 return -ENOMEM;
279
280         internals->tx_null_queues[tx_queue_id].internals = internals;
281         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
282
283         return 0;
284 }
285
286
287 static void
288 eth_dev_info(struct rte_eth_dev *dev,
289                 struct rte_eth_dev_info *dev_info)
290 {
291         struct pmd_internals *internals;
292
293         if ((dev == NULL) || (dev_info == NULL))
294                 return;
295
296         internals = dev->data->dev_private;
297         dev_info->max_mac_addrs = 1;
298         dev_info->max_rx_pktlen = (uint32_t)-1;
299         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
300         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
301         dev_info->min_rx_bufsize = 0;
302         dev_info->reta_size = internals->reta_size;
303         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
304 }
305
306 static void
307 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
308 {
309         unsigned i, num_stats;
310         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
311         const struct pmd_internals *internal;
312
313         if ((dev == NULL) || (igb_stats == NULL))
314                 return;
315
316         internal = dev->data->dev_private;
317         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
318                         RTE_MIN(dev->data->nb_rx_queues,
319                                 RTE_DIM(internal->rx_null_queues)));
320         for (i = 0; i < num_stats; i++) {
321                 igb_stats->q_ipackets[i] =
322                         internal->rx_null_queues[i].rx_pkts.cnt;
323                 rx_total += igb_stats->q_ipackets[i];
324         }
325
326         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
327                         RTE_MIN(dev->data->nb_tx_queues,
328                                 RTE_DIM(internal->tx_null_queues)));
329         for (i = 0; i < num_stats; i++) {
330                 igb_stats->q_opackets[i] =
331                         internal->tx_null_queues[i].tx_pkts.cnt;
332                 igb_stats->q_errors[i] =
333                         internal->tx_null_queues[i].err_pkts.cnt;
334                 tx_total += igb_stats->q_opackets[i];
335                 tx_err_total += igb_stats->q_errors[i];
336         }
337
338         igb_stats->ipackets = rx_total;
339         igb_stats->opackets = tx_total;
340         igb_stats->oerrors = tx_err_total;
341 }
342
343 static void
344 eth_stats_reset(struct rte_eth_dev *dev)
345 {
346         unsigned i;
347         struct pmd_internals *internal;
348
349         if (dev == NULL)
350                 return;
351
352         internal = dev->data->dev_private;
353         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
354                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
355         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
356                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
357                 internal->tx_null_queues[i].err_pkts.cnt = 0;
358         }
359 }
360
361 static void
362 eth_queue_release(void *q)
363 {
364         struct null_queue *nq;
365
366         if (q == NULL)
367                 return;
368
369         nq = q;
370         rte_free(nq->dummy_packet);
371 }
372
373 static int
374 eth_link_update(struct rte_eth_dev *dev __rte_unused,
375                 int wait_to_complete __rte_unused) { return 0; }
376
377 static int
378 eth_rss_reta_update(struct rte_eth_dev *dev,
379                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
380 {
381         int i, j;
382         struct pmd_internals *internal = dev->data->dev_private;
383
384         if (reta_size != internal->reta_size)
385                 return -EINVAL;
386
387         rte_spinlock_lock(&internal->rss_lock);
388
389         /* Copy RETA table */
390         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
391                 internal->reta_conf[i].mask = reta_conf[i].mask;
392                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
393                         if ((reta_conf[i].mask >> j) & 0x01)
394                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
395         }
396
397         rte_spinlock_unlock(&internal->rss_lock);
398
399         return 0;
400 }
401
402 static int
403 eth_rss_reta_query(struct rte_eth_dev *dev,
404                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
405 {
406         int i, j;
407         struct pmd_internals *internal = dev->data->dev_private;
408
409         if (reta_size != internal->reta_size)
410                 return -EINVAL;
411
412         rte_spinlock_lock(&internal->rss_lock);
413
414         /* Copy RETA table */
415         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
416                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
417                         if ((reta_conf[i].mask >> j) & 0x01)
418                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
419         }
420
421         rte_spinlock_unlock(&internal->rss_lock);
422
423         return 0;
424 }
425
426 static int
427 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
428 {
429         struct pmd_internals *internal = dev->data->dev_private;
430
431         rte_spinlock_lock(&internal->rss_lock);
432
433         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
434                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
435                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
436
437         if (rss_conf->rss_key)
438                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
439
440         rte_spinlock_unlock(&internal->rss_lock);
441
442         return 0;
443 }
444
445 static int
446 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
447                 struct rte_eth_rss_conf *rss_conf)
448 {
449         struct pmd_internals *internal = dev->data->dev_private;
450
451         rte_spinlock_lock(&internal->rss_lock);
452
453         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
454         if (rss_conf->rss_key)
455                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
456
457         rte_spinlock_unlock(&internal->rss_lock);
458
459         return 0;
460 }
461
462 static const struct eth_dev_ops ops = {
463         .dev_start = eth_dev_start,
464         .dev_stop = eth_dev_stop,
465         .dev_configure = eth_dev_configure,
466         .dev_infos_get = eth_dev_info,
467         .rx_queue_setup = eth_rx_queue_setup,
468         .tx_queue_setup = eth_tx_queue_setup,
469         .rx_queue_release = eth_queue_release,
470         .tx_queue_release = eth_queue_release,
471         .link_update = eth_link_update,
472         .stats_get = eth_stats_get,
473         .stats_reset = eth_stats_reset,
474         .reta_update = eth_rss_reta_update,
475         .reta_query = eth_rss_reta_query,
476         .rss_hash_update = eth_rss_hash_update,
477         .rss_hash_conf_get = eth_rss_hash_conf_get
478 };
479
480 int
481 eth_dev_null_create(const char *name,
482                 const unsigned numa_node,
483                 unsigned packet_size,
484                 unsigned packet_copy)
485 {
486         const unsigned nb_rx_queues = 1;
487         const unsigned nb_tx_queues = 1;
488         struct rte_eth_dev_data *data = NULL;
489         struct pmd_internals *internals = NULL;
490         struct rte_eth_dev *eth_dev = NULL;
491
492         static const uint8_t default_rss_key[40] = {
493                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
494                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
495                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
496                 0xBE, 0xAC, 0x01, 0xFA
497         };
498
499         if (name == NULL)
500                 return -EINVAL;
501
502         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
503                         numa_node);
504
505         /* now do all data allocation - for eth_dev structure, dummy pci driver
506          * and internal (private) data
507          */
508         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
509         if (data == NULL)
510                 goto error;
511
512         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
513         if (internals == NULL)
514                 goto error;
515
516         /* reserve an ethdev entry */
517         eth_dev = rte_eth_dev_allocate(name);
518         if (eth_dev == NULL)
519                 goto error;
520
521         /* now put it all together
522          * - store queue data in internals,
523          * - store numa_node info in ethdev data
524          * - point eth_dev_data to internals
525          * - and point eth_dev structure to new eth_dev_data structure
526          */
527         /* NOTE: we'll replace the data element, of originally allocated eth_dev
528          * so the nulls are local per-process */
529
530         internals->packet_size = packet_size;
531         internals->packet_copy = packet_copy;
532         internals->port_id = eth_dev->data->port_id;
533
534         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
535         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
536
537         rte_memcpy(internals->rss_key, default_rss_key, 40);
538
539         data->dev_private = internals;
540         data->port_id = eth_dev->data->port_id;
541         data->nb_rx_queues = (uint16_t)nb_rx_queues;
542         data->nb_tx_queues = (uint16_t)nb_tx_queues;
543         data->dev_link = pmd_link;
544         data->mac_addrs = &eth_addr;
545         strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
546
547         eth_dev->data = data;
548         eth_dev->dev_ops = &ops;
549
550         eth_dev->driver = NULL;
551         data->dev_flags = RTE_ETH_DEV_DETACHABLE;
552         data->kdrv = RTE_KDRV_NONE;
553         data->drv_name = "Null PMD";
554         data->numa_node = numa_node;
555
556         /* finally assign rx and tx ops */
557         if (packet_copy) {
558                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
559                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
560         } else {
561                 eth_dev->rx_pkt_burst = eth_null_rx;
562                 eth_dev->tx_pkt_burst = eth_null_tx;
563         }
564
565         return 0;
566
567 error:
568         rte_free(data);
569         rte_free(internals);
570
571         return -1;
572 }
573
574 static inline int
575 get_packet_size_arg(const char *key __rte_unused,
576                 const char *value, void *extra_args)
577 {
578         const char *a = value;
579         unsigned *packet_size = extra_args;
580
581         if ((value == NULL) || (extra_args == NULL))
582                 return -EINVAL;
583
584         *packet_size = (unsigned)strtoul(a, NULL, 0);
585         if (*packet_size == UINT_MAX)
586                 return -1;
587
588         return 0;
589 }
590
591 static inline int
592 get_packet_copy_arg(const char *key __rte_unused,
593                 const char *value, void *extra_args)
594 {
595         const char *a = value;
596         unsigned *packet_copy = extra_args;
597
598         if ((value == NULL) || (extra_args == NULL))
599                 return -EINVAL;
600
601         *packet_copy = (unsigned)strtoul(a, NULL, 0);
602         if (*packet_copy == UINT_MAX)
603                 return -1;
604
605         return 0;
606 }
607
608 static int
609 rte_pmd_null_probe(const char *name, const char *params)
610 {
611         unsigned numa_node;
612         unsigned packet_size = default_packet_size;
613         unsigned packet_copy = default_packet_copy;
614         struct rte_kvargs *kvlist = NULL;
615         int ret;
616
617         if (name == NULL)
618                 return -EINVAL;
619
620         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
621
622         numa_node = rte_socket_id();
623
624         if (params != NULL) {
625                 kvlist = rte_kvargs_parse(params, valid_arguments);
626                 if (kvlist == NULL)
627                         return -1;
628
629                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
630
631                         ret = rte_kvargs_process(kvlist,
632                                         ETH_NULL_PACKET_SIZE_ARG,
633                                         &get_packet_size_arg, &packet_size);
634                         if (ret < 0)
635                                 goto free_kvlist;
636                 }
637
638                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
639
640                         ret = rte_kvargs_process(kvlist,
641                                         ETH_NULL_PACKET_COPY_ARG,
642                                         &get_packet_copy_arg, &packet_copy);
643                         if (ret < 0)
644                                 goto free_kvlist;
645                 }
646         }
647
648         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
649                         "packet copy is %s\n", packet_size,
650                         packet_copy ? "enabled" : "disabled");
651
652         ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
653
654 free_kvlist:
655         if (kvlist)
656                 rte_kvargs_free(kvlist);
657         return ret;
658 }
659
660 static int
661 rte_pmd_null_remove(const char *name)
662 {
663         struct rte_eth_dev *eth_dev = NULL;
664
665         if (name == NULL)
666                 return -EINVAL;
667
668         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
669                         rte_socket_id());
670
671         /* find the ethdev entry */
672         eth_dev = rte_eth_dev_allocated(name);
673         if (eth_dev == NULL)
674                 return -1;
675
676         rte_free(eth_dev->data->dev_private);
677         rte_free(eth_dev->data);
678
679         rte_eth_dev_release_port(eth_dev);
680
681         return 0;
682 }
683
684 static struct rte_vdev_driver pmd_null_drv = {
685         .probe = rte_pmd_null_probe,
686         .remove = rte_pmd_null_remove,
687 };
688
689 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
690 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
691 RTE_PMD_REGISTER_PARAM_STRING(net_null,
692         "size=<int> "
693         "copy=<int>");