mlx4: check if port is configured for ethernet
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_dev.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
41
42 #include "rte_eth_null.h"
43
44 #define ETH_NULL_PACKET_SIZE_ARG        "size"
45 #define ETH_NULL_PACKET_COPY_ARG        "copy"
46
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
49
50 static const char *valid_arguments[] = {
51         ETH_NULL_PACKET_SIZE_ARG,
52         ETH_NULL_PACKET_COPY_ARG,
53         NULL
54 };
55
56 struct pmd_internals;
57
58 struct null_queue {
59         struct pmd_internals *internals;
60
61         struct rte_mempool *mb_pool;
62         struct rte_mbuf *dummy_packet;
63
64         rte_atomic64_t rx_pkts;
65         rte_atomic64_t tx_pkts;
66         rte_atomic64_t err_pkts;
67 };
68
69 struct pmd_internals {
70         unsigned packet_size;
71         unsigned packet_copy;
72
73         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75
76         /** Bit mask of RSS offloads, the bit offset also means flow type */
77         uint64_t flow_type_rss_offloads;
78
79         rte_spinlock_t rss_lock;
80
81         uint16_t reta_size;
82         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
83                         RTE_RETA_GROUP_SIZE];
84
85         uint8_t rss_key[40];                /**< 40-byte hash key. */
86 };
87
88
89 static struct ether_addr eth_addr = { .addr_bytes = {0} };
90 static const char *drivername = "Null PMD";
91 static struct rte_eth_link pmd_link = {
92         .link_speed = 10000,
93         .link_duplex = ETH_LINK_FULL_DUPLEX,
94         .link_status = 0
95 };
96
97 static uint16_t
98 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
99 {
100         int i;
101         struct null_queue *h = q;
102         unsigned packet_size;
103
104         if ((q == NULL) || (bufs == NULL))
105                 return 0;
106
107         packet_size = h->internals->packet_size;
108         for (i = 0; i < nb_bufs; i++) {
109                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
110                 if (!bufs[i])
111                         break;
112                 bufs[i]->data_len = (uint16_t)packet_size;
113                 bufs[i]->pkt_len = packet_size;
114                 bufs[i]->nb_segs = 1;
115                 bufs[i]->next = NULL;
116         }
117
118         rte_atomic64_add(&(h->rx_pkts), i);
119
120         return i;
121 }
122
123 static uint16_t
124 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
125 {
126         int i;
127         struct null_queue *h = q;
128         unsigned packet_size;
129
130         if ((q == NULL) || (bufs == NULL))
131                 return 0;
132
133         packet_size = h->internals->packet_size;
134         for (i = 0; i < nb_bufs; i++) {
135                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
136                 if (!bufs[i])
137                         break;
138                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
139                                         packet_size);
140                 bufs[i]->data_len = (uint16_t)packet_size;
141                 bufs[i]->pkt_len = packet_size;
142                 bufs[i]->nb_segs = 1;
143                 bufs[i]->next = NULL;
144         }
145
146         rte_atomic64_add(&(h->rx_pkts), i);
147
148         return i;
149 }
150
151 static uint16_t
152 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
153 {
154         int i;
155         struct null_queue *h = q;
156
157         if ((q == NULL) || (bufs == NULL))
158                 return 0;
159
160         for (i = 0; i < nb_bufs; i++)
161                 rte_pktmbuf_free(bufs[i]);
162
163         rte_atomic64_add(&(h->tx_pkts), i);
164
165         return i;
166 }
167
168 static uint16_t
169 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
170 {
171         int i;
172         struct null_queue *h = q;
173         unsigned packet_size;
174
175         if ((q == NULL) || (bufs == NULL))
176                 return 0;
177
178         packet_size = h->internals->packet_size;
179         for (i = 0; i < nb_bufs; i++) {
180                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
181                                         packet_size);
182                 rte_pktmbuf_free(bufs[i]);
183         }
184
185         rte_atomic64_add(&(h->tx_pkts), i);
186
187         return i;
188 }
189
190 static int
191 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
192 {
193         return 0;
194 }
195
196 static int
197 eth_dev_start(struct rte_eth_dev *dev)
198 {
199         if (dev == NULL)
200                 return -EINVAL;
201
202         dev->data->dev_link.link_status = 1;
203         return 0;
204 }
205
206 static void
207 eth_dev_stop(struct rte_eth_dev *dev)
208 {
209         if (dev == NULL)
210                 return;
211
212         dev->data->dev_link.link_status = 0;
213 }
214
215 static int
216 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
217                 uint16_t nb_rx_desc __rte_unused,
218                 unsigned int socket_id __rte_unused,
219                 const struct rte_eth_rxconf *rx_conf __rte_unused,
220                 struct rte_mempool *mb_pool)
221 {
222         struct rte_mbuf *dummy_packet;
223         struct pmd_internals *internals;
224         unsigned packet_size;
225
226         if ((dev == NULL) || (mb_pool == NULL))
227                 return -EINVAL;
228
229         internals = dev->data->dev_private;
230
231         if (rx_queue_id >= dev->data->nb_rx_queues)
232                 return -ENODEV;
233
234         packet_size = internals->packet_size;
235
236         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
237         dev->data->rx_queues[rx_queue_id] =
238                 &internals->rx_null_queues[rx_queue_id];
239         dummy_packet = rte_zmalloc_socket(NULL,
240                         packet_size, 0, dev->data->numa_node);
241         if (dummy_packet == NULL)
242                 return -ENOMEM;
243
244         internals->rx_null_queues[rx_queue_id].internals = internals;
245         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
246
247         return 0;
248 }
249
250 static int
251 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
252                 uint16_t nb_tx_desc __rte_unused,
253                 unsigned int socket_id __rte_unused,
254                 const struct rte_eth_txconf *tx_conf __rte_unused)
255 {
256         struct rte_mbuf *dummy_packet;
257         struct pmd_internals *internals;
258         unsigned packet_size;
259
260         if (dev == NULL)
261                 return -EINVAL;
262
263         internals = dev->data->dev_private;
264
265         if (tx_queue_id >= dev->data->nb_tx_queues)
266                 return -ENODEV;
267
268         packet_size = internals->packet_size;
269
270         dev->data->tx_queues[tx_queue_id] =
271                 &internals->tx_null_queues[tx_queue_id];
272         dummy_packet = rte_zmalloc_socket(NULL,
273                         packet_size, 0, dev->data->numa_node);
274         if (dummy_packet == NULL)
275                 return -ENOMEM;
276
277         internals->tx_null_queues[tx_queue_id].internals = internals;
278         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
279
280         return 0;
281 }
282
283
284 static void
285 eth_dev_info(struct rte_eth_dev *dev,
286                 struct rte_eth_dev_info *dev_info)
287 {
288         struct pmd_internals *internals;
289
290         if ((dev == NULL) || (dev_info == NULL))
291                 return;
292
293         internals = dev->data->dev_private;
294         dev_info->driver_name = drivername;
295         dev_info->max_mac_addrs = 1;
296         dev_info->max_rx_pktlen = (uint32_t)-1;
297         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
298         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
299         dev_info->min_rx_bufsize = 0;
300         dev_info->pci_dev = NULL;
301         dev_info->reta_size = internals->reta_size;
302         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
303 }
304
305 static void
306 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
307 {
308         unsigned i, num_stats;
309         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
310         const struct pmd_internals *internal;
311
312         if ((dev == NULL) || (igb_stats == NULL))
313                 return;
314
315         internal = dev->data->dev_private;
316         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
317                         RTE_MIN(dev->data->nb_rx_queues,
318                                 RTE_DIM(internal->rx_null_queues)));
319         for (i = 0; i < num_stats; i++) {
320                 igb_stats->q_ipackets[i] =
321                         internal->rx_null_queues[i].rx_pkts.cnt;
322                 rx_total += igb_stats->q_ipackets[i];
323         }
324
325         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
326                         RTE_MIN(dev->data->nb_tx_queues,
327                                 RTE_DIM(internal->tx_null_queues)));
328         for (i = 0; i < num_stats; i++) {
329                 igb_stats->q_opackets[i] =
330                         internal->tx_null_queues[i].tx_pkts.cnt;
331                 igb_stats->q_errors[i] =
332                         internal->tx_null_queues[i].err_pkts.cnt;
333                 tx_total += igb_stats->q_opackets[i];
334                 tx_err_total += igb_stats->q_errors[i];
335         }
336
337         igb_stats->ipackets = rx_total;
338         igb_stats->opackets = tx_total;
339         igb_stats->oerrors = tx_err_total;
340 }
341
342 static void
343 eth_stats_reset(struct rte_eth_dev *dev)
344 {
345         unsigned i;
346         struct pmd_internals *internal;
347
348         if (dev == NULL)
349                 return;
350
351         internal = dev->data->dev_private;
352         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
353                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
354         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
355                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
356                 internal->tx_null_queues[i].err_pkts.cnt = 0;
357         }
358 }
359
360 static void
361 eth_queue_release(void *q)
362 {
363         struct null_queue *nq;
364
365         if (q == NULL)
366                 return;
367
368         nq = q;
369         rte_free(nq->dummy_packet);
370 }
371
372 static int
373 eth_link_update(struct rte_eth_dev *dev __rte_unused,
374                 int wait_to_complete __rte_unused) { return 0; }
375
376 static int
377 eth_rss_reta_update(struct rte_eth_dev *dev,
378                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
379 {
380         int i, j;
381         struct pmd_internals *internal = dev->data->dev_private;
382
383         if (reta_size != internal->reta_size)
384                 return -EINVAL;
385
386         rte_spinlock_lock(&internal->rss_lock);
387
388         /* Copy RETA table */
389         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
390                 internal->reta_conf[i].mask = reta_conf[i].mask;
391                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
392                         if ((reta_conf[i].mask >> j) & 0x01)
393                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
394         }
395
396         rte_spinlock_unlock(&internal->rss_lock);
397
398         return 0;
399 }
400
401 static int
402 eth_rss_reta_query(struct rte_eth_dev *dev,
403                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
404 {
405         int i, j;
406         struct pmd_internals *internal = dev->data->dev_private;
407
408         if (reta_size != internal->reta_size)
409                 return -EINVAL;
410
411         rte_spinlock_lock(&internal->rss_lock);
412
413         /* Copy RETA table */
414         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
415                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
416                         if ((reta_conf[i].mask >> j) & 0x01)
417                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
418         }
419
420         rte_spinlock_unlock(&internal->rss_lock);
421
422         return 0;
423 }
424
425 static int
426 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
427 {
428         struct pmd_internals *internal = dev->data->dev_private;
429
430         rte_spinlock_lock(&internal->rss_lock);
431
432         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
433                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
434                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
435
436         if (rss_conf->rss_key)
437                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
438
439         rte_spinlock_unlock(&internal->rss_lock);
440
441         return 0;
442 }
443
444 static int
445 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
446                 struct rte_eth_rss_conf *rss_conf)
447 {
448         struct pmd_internals *internal = dev->data->dev_private;
449
450         rte_spinlock_lock(&internal->rss_lock);
451
452         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
453         if (rss_conf->rss_key)
454                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
455
456         rte_spinlock_unlock(&internal->rss_lock);
457
458         return 0;
459 }
460
461 static const struct eth_dev_ops ops = {
462         .dev_start = eth_dev_start,
463         .dev_stop = eth_dev_stop,
464         .dev_configure = eth_dev_configure,
465         .dev_infos_get = eth_dev_info,
466         .rx_queue_setup = eth_rx_queue_setup,
467         .tx_queue_setup = eth_tx_queue_setup,
468         .rx_queue_release = eth_queue_release,
469         .tx_queue_release = eth_queue_release,
470         .link_update = eth_link_update,
471         .stats_get = eth_stats_get,
472         .stats_reset = eth_stats_reset,
473         .reta_update = eth_rss_reta_update,
474         .reta_query = eth_rss_reta_query,
475         .rss_hash_update = eth_rss_hash_update,
476         .rss_hash_conf_get = eth_rss_hash_conf_get
477 };
478
479 int
480 eth_dev_null_create(const char *name,
481                 const unsigned numa_node,
482                 unsigned packet_size,
483                 unsigned packet_copy)
484 {
485         const unsigned nb_rx_queues = 1;
486         const unsigned nb_tx_queues = 1;
487         struct rte_eth_dev_data *data = NULL;
488         struct pmd_internals *internals = NULL;
489         struct rte_eth_dev *eth_dev = NULL;
490
491         static const uint8_t default_rss_key[40] = {
492                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
493                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
494                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
495                 0xBE, 0xAC, 0x01, 0xFA
496         };
497
498         if (name == NULL)
499                 return -EINVAL;
500
501         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
502                         numa_node);
503
504         /* now do all data allocation - for eth_dev structure, dummy pci driver
505          * and internal (private) data
506          */
507         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
508         if (data == NULL)
509                 goto error;
510
511         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
512         if (internals == NULL)
513                 goto error;
514
515         /* reserve an ethdev entry */
516         eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
517         if (eth_dev == NULL)
518                 goto error;
519
520         /* now put it all together
521          * - store queue data in internals,
522          * - store numa_node info in ethdev data
523          * - point eth_dev_data to internals
524          * - and point eth_dev structure to new eth_dev_data structure
525          */
526         /* NOTE: we'll replace the data element, of originally allocated eth_dev
527          * so the nulls are local per-process */
528
529         internals->packet_size = packet_size;
530         internals->packet_copy = packet_copy;
531
532         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
533         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
534
535         rte_memcpy(internals->rss_key, default_rss_key, 40);
536
537         data->dev_private = internals;
538         data->port_id = eth_dev->data->port_id;
539         data->nb_rx_queues = (uint16_t)nb_rx_queues;
540         data->nb_tx_queues = (uint16_t)nb_tx_queues;
541         data->dev_link = pmd_link;
542         data->mac_addrs = &eth_addr;
543         strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
544
545         eth_dev->data = data;
546         eth_dev->dev_ops = &ops;
547
548         TAILQ_INIT(&eth_dev->link_intr_cbs);
549
550         eth_dev->driver = NULL;
551         data->dev_flags = RTE_ETH_DEV_DETACHABLE;
552         data->kdrv = RTE_KDRV_NONE;
553         data->drv_name = drivername;
554         data->numa_node = numa_node;
555
556         /* finally assign rx and tx ops */
557         if (packet_copy) {
558                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
559                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
560         } else {
561                 eth_dev->rx_pkt_burst = eth_null_rx;
562                 eth_dev->tx_pkt_burst = eth_null_tx;
563         }
564
565         return 0;
566
567 error:
568         rte_free(data);
569         rte_free(internals);
570
571         return -1;
572 }
573
574 static inline int
575 get_packet_size_arg(const char *key __rte_unused,
576                 const char *value, void *extra_args)
577 {
578         const char *a = value;
579         unsigned *packet_size = extra_args;
580
581         if ((value == NULL) || (extra_args == NULL))
582                 return -EINVAL;
583
584         *packet_size = (unsigned)strtoul(a, NULL, 0);
585         if (*packet_size == UINT_MAX)
586                 return -1;
587
588         return 0;
589 }
590
591 static inline int
592 get_packet_copy_arg(const char *key __rte_unused,
593                 const char *value, void *extra_args)
594 {
595         const char *a = value;
596         unsigned *packet_copy = extra_args;
597
598         if ((value == NULL) || (extra_args == NULL))
599                 return -EINVAL;
600
601         *packet_copy = (unsigned)strtoul(a, NULL, 0);
602         if (*packet_copy == UINT_MAX)
603                 return -1;
604
605         return 0;
606 }
607
608 static int
609 rte_pmd_null_devinit(const char *name, const char *params)
610 {
611         unsigned numa_node;
612         unsigned packet_size = default_packet_size;
613         unsigned packet_copy = default_packet_copy;
614         struct rte_kvargs *kvlist = NULL;
615         int ret;
616
617         if (name == NULL)
618                 return -EINVAL;
619
620         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
621
622         numa_node = rte_socket_id();
623
624         if (params != NULL) {
625                 kvlist = rte_kvargs_parse(params, valid_arguments);
626                 if (kvlist == NULL)
627                         return -1;
628
629                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
630
631                         ret = rte_kvargs_process(kvlist,
632                                         ETH_NULL_PACKET_SIZE_ARG,
633                                         &get_packet_size_arg, &packet_size);
634                         if (ret < 0)
635                                 goto free_kvlist;
636                 }
637
638                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
639
640                         ret = rte_kvargs_process(kvlist,
641                                         ETH_NULL_PACKET_COPY_ARG,
642                                         &get_packet_copy_arg, &packet_copy);
643                         if (ret < 0)
644                                 goto free_kvlist;
645                 }
646         }
647
648         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
649                         "packet copy is %s\n", packet_size,
650                         packet_copy ? "enabled" : "disabled");
651
652         ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
653
654 free_kvlist:
655         if (kvlist)
656                 rte_kvargs_free(kvlist);
657         return ret;
658 }
659
660 static int
661 rte_pmd_null_devuninit(const char *name)
662 {
663         struct rte_eth_dev *eth_dev = NULL;
664
665         if (name == NULL)
666                 return -EINVAL;
667
668         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
669                         rte_socket_id());
670
671         /* find the ethdev entry */
672         eth_dev = rte_eth_dev_allocated(name);
673         if (eth_dev == NULL)
674                 return -1;
675
676         rte_free(eth_dev->data->dev_private);
677         rte_free(eth_dev->data);
678
679         rte_eth_dev_release_port(eth_dev);
680
681         return 0;
682 }
683
684 static struct rte_driver pmd_null_drv = {
685         .name = "eth_null",
686         .type = PMD_VDEV,
687         .init = rte_pmd_null_devinit,
688         .uninit = rte_pmd_null_devuninit,
689 };
690
691 PMD_REGISTER_DRIVER(pmd_null_drv);