test: do not short-circuit null device creation
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_vdev.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
41
42 #include "rte_eth_null.h"
43
44 #define ETH_NULL_PACKET_SIZE_ARG        "size"
45 #define ETH_NULL_PACKET_COPY_ARG        "copy"
46
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
49
50 static const char *valid_arguments[] = {
51         ETH_NULL_PACKET_SIZE_ARG,
52         ETH_NULL_PACKET_COPY_ARG,
53         "driver",
54         NULL
55 };
56
57 struct pmd_internals;
58
59 struct null_queue {
60         struct pmd_internals *internals;
61
62         struct rte_mempool *mb_pool;
63         struct rte_mbuf *dummy_packet;
64
65         rte_atomic64_t rx_pkts;
66         rte_atomic64_t tx_pkts;
67         rte_atomic64_t err_pkts;
68 };
69
70 struct pmd_internals {
71         unsigned packet_size;
72         unsigned packet_copy;
73         uint8_t port_id;
74
75         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
77
78         /** Bit mask of RSS offloads, the bit offset also means flow type */
79         uint64_t flow_type_rss_offloads;
80
81         rte_spinlock_t rss_lock;
82
83         uint16_t reta_size;
84         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
85                         RTE_RETA_GROUP_SIZE];
86
87         uint8_t rss_key[40];                /**< 40-byte hash key. */
88 };
89
90
91 static struct ether_addr eth_addr = { .addr_bytes = {0} };
92 static struct rte_eth_link pmd_link = {
93         .link_speed = ETH_SPEED_NUM_10G,
94         .link_duplex = ETH_LINK_FULL_DUPLEX,
95         .link_status = ETH_LINK_DOWN,
96         .link_autoneg = ETH_LINK_SPEED_AUTONEG,
97 };
98
99 static uint16_t
100 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
101 {
102         int i;
103         struct null_queue *h = q;
104         unsigned packet_size;
105
106         if ((q == NULL) || (bufs == NULL))
107                 return 0;
108
109         packet_size = h->internals->packet_size;
110         for (i = 0; i < nb_bufs; i++) {
111                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
112                 if (!bufs[i])
113                         break;
114                 bufs[i]->data_len = (uint16_t)packet_size;
115                 bufs[i]->pkt_len = packet_size;
116                 bufs[i]->port = h->internals->port_id;
117         }
118
119         rte_atomic64_add(&(h->rx_pkts), i);
120
121         return i;
122 }
123
124 static uint16_t
125 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
126 {
127         int i;
128         struct null_queue *h = q;
129         unsigned packet_size;
130
131         if ((q == NULL) || (bufs == NULL))
132                 return 0;
133
134         packet_size = h->internals->packet_size;
135         for (i = 0; i < nb_bufs; i++) {
136                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
137                 if (!bufs[i])
138                         break;
139                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
140                                         packet_size);
141                 bufs[i]->data_len = (uint16_t)packet_size;
142                 bufs[i]->pkt_len = packet_size;
143                 bufs[i]->nb_segs = 1;
144                 bufs[i]->next = NULL;
145                 bufs[i]->port = h->internals->port_id;
146         }
147
148         rte_atomic64_add(&(h->rx_pkts), i);
149
150         return i;
151 }
152
153 static uint16_t
154 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
155 {
156         int i;
157         struct null_queue *h = q;
158
159         if ((q == NULL) || (bufs == NULL))
160                 return 0;
161
162         for (i = 0; i < nb_bufs; i++)
163                 rte_pktmbuf_free(bufs[i]);
164
165         rte_atomic64_add(&(h->tx_pkts), i);
166
167         return i;
168 }
169
170 static uint16_t
171 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
172 {
173         int i;
174         struct null_queue *h = q;
175         unsigned packet_size;
176
177         if ((q == NULL) || (bufs == NULL))
178                 return 0;
179
180         packet_size = h->internals->packet_size;
181         for (i = 0; i < nb_bufs; i++) {
182                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
183                                         packet_size);
184                 rte_pktmbuf_free(bufs[i]);
185         }
186
187         rte_atomic64_add(&(h->tx_pkts), i);
188
189         return i;
190 }
191
192 static int
193 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
194 {
195         return 0;
196 }
197
198 static int
199 eth_dev_start(struct rte_eth_dev *dev)
200 {
201         if (dev == NULL)
202                 return -EINVAL;
203
204         dev->data->dev_link.link_status = ETH_LINK_UP;
205         return 0;
206 }
207
208 static void
209 eth_dev_stop(struct rte_eth_dev *dev)
210 {
211         if (dev == NULL)
212                 return;
213
214         dev->data->dev_link.link_status = ETH_LINK_DOWN;
215 }
216
217 static int
218 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
219                 uint16_t nb_rx_desc __rte_unused,
220                 unsigned int socket_id __rte_unused,
221                 const struct rte_eth_rxconf *rx_conf __rte_unused,
222                 struct rte_mempool *mb_pool)
223 {
224         struct rte_mbuf *dummy_packet;
225         struct pmd_internals *internals;
226         unsigned packet_size;
227
228         if ((dev == NULL) || (mb_pool == NULL))
229                 return -EINVAL;
230
231         internals = dev->data->dev_private;
232
233         if (rx_queue_id >= dev->data->nb_rx_queues)
234                 return -ENODEV;
235
236         packet_size = internals->packet_size;
237
238         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
239         dev->data->rx_queues[rx_queue_id] =
240                 &internals->rx_null_queues[rx_queue_id];
241         dummy_packet = rte_zmalloc_socket(NULL,
242                         packet_size, 0, dev->data->numa_node);
243         if (dummy_packet == NULL)
244                 return -ENOMEM;
245
246         internals->rx_null_queues[rx_queue_id].internals = internals;
247         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
248
249         return 0;
250 }
251
252 static int
253 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
254                 uint16_t nb_tx_desc __rte_unused,
255                 unsigned int socket_id __rte_unused,
256                 const struct rte_eth_txconf *tx_conf __rte_unused)
257 {
258         struct rte_mbuf *dummy_packet;
259         struct pmd_internals *internals;
260         unsigned packet_size;
261
262         if (dev == NULL)
263                 return -EINVAL;
264
265         internals = dev->data->dev_private;
266
267         if (tx_queue_id >= dev->data->nb_tx_queues)
268                 return -ENODEV;
269
270         packet_size = internals->packet_size;
271
272         dev->data->tx_queues[tx_queue_id] =
273                 &internals->tx_null_queues[tx_queue_id];
274         dummy_packet = rte_zmalloc_socket(NULL,
275                         packet_size, 0, dev->data->numa_node);
276         if (dummy_packet == NULL)
277                 return -ENOMEM;
278
279         internals->tx_null_queues[tx_queue_id].internals = internals;
280         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
281
282         return 0;
283 }
284
285
286 static void
287 eth_dev_info(struct rte_eth_dev *dev,
288                 struct rte_eth_dev_info *dev_info)
289 {
290         struct pmd_internals *internals;
291
292         if ((dev == NULL) || (dev_info == NULL))
293                 return;
294
295         internals = dev->data->dev_private;
296         dev_info->max_mac_addrs = 1;
297         dev_info->max_rx_pktlen = (uint32_t)-1;
298         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
299         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
300         dev_info->min_rx_bufsize = 0;
301         dev_info->reta_size = internals->reta_size;
302         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
303 }
304
305 static void
306 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
307 {
308         unsigned i, num_stats;
309         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
310         const struct pmd_internals *internal;
311
312         if ((dev == NULL) || (igb_stats == NULL))
313                 return;
314
315         internal = dev->data->dev_private;
316         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
317                         RTE_MIN(dev->data->nb_rx_queues,
318                                 RTE_DIM(internal->rx_null_queues)));
319         for (i = 0; i < num_stats; i++) {
320                 igb_stats->q_ipackets[i] =
321                         internal->rx_null_queues[i].rx_pkts.cnt;
322                 rx_total += igb_stats->q_ipackets[i];
323         }
324
325         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
326                         RTE_MIN(dev->data->nb_tx_queues,
327                                 RTE_DIM(internal->tx_null_queues)));
328         for (i = 0; i < num_stats; i++) {
329                 igb_stats->q_opackets[i] =
330                         internal->tx_null_queues[i].tx_pkts.cnt;
331                 igb_stats->q_errors[i] =
332                         internal->tx_null_queues[i].err_pkts.cnt;
333                 tx_total += igb_stats->q_opackets[i];
334                 tx_err_total += igb_stats->q_errors[i];
335         }
336
337         igb_stats->ipackets = rx_total;
338         igb_stats->opackets = tx_total;
339         igb_stats->oerrors = tx_err_total;
340 }
341
342 static void
343 eth_stats_reset(struct rte_eth_dev *dev)
344 {
345         unsigned i;
346         struct pmd_internals *internal;
347
348         if (dev == NULL)
349                 return;
350
351         internal = dev->data->dev_private;
352         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
353                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
354         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
355                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
356                 internal->tx_null_queues[i].err_pkts.cnt = 0;
357         }
358 }
359
360 static void
361 eth_queue_release(void *q)
362 {
363         struct null_queue *nq;
364
365         if (q == NULL)
366                 return;
367
368         nq = q;
369         rte_free(nq->dummy_packet);
370 }
371
372 static int
373 eth_link_update(struct rte_eth_dev *dev __rte_unused,
374                 int wait_to_complete __rte_unused) { return 0; }
375
376 static int
377 eth_rss_reta_update(struct rte_eth_dev *dev,
378                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
379 {
380         int i, j;
381         struct pmd_internals *internal = dev->data->dev_private;
382
383         if (reta_size != internal->reta_size)
384                 return -EINVAL;
385
386         rte_spinlock_lock(&internal->rss_lock);
387
388         /* Copy RETA table */
389         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
390                 internal->reta_conf[i].mask = reta_conf[i].mask;
391                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
392                         if ((reta_conf[i].mask >> j) & 0x01)
393                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
394         }
395
396         rte_spinlock_unlock(&internal->rss_lock);
397
398         return 0;
399 }
400
401 static int
402 eth_rss_reta_query(struct rte_eth_dev *dev,
403                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
404 {
405         int i, j;
406         struct pmd_internals *internal = dev->data->dev_private;
407
408         if (reta_size != internal->reta_size)
409                 return -EINVAL;
410
411         rte_spinlock_lock(&internal->rss_lock);
412
413         /* Copy RETA table */
414         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
415                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
416                         if ((reta_conf[i].mask >> j) & 0x01)
417                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
418         }
419
420         rte_spinlock_unlock(&internal->rss_lock);
421
422         return 0;
423 }
424
425 static int
426 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
427 {
428         struct pmd_internals *internal = dev->data->dev_private;
429
430         rte_spinlock_lock(&internal->rss_lock);
431
432         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
433                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
434                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
435
436         if (rss_conf->rss_key)
437                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
438
439         rte_spinlock_unlock(&internal->rss_lock);
440
441         return 0;
442 }
443
444 static int
445 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
446                 struct rte_eth_rss_conf *rss_conf)
447 {
448         struct pmd_internals *internal = dev->data->dev_private;
449
450         rte_spinlock_lock(&internal->rss_lock);
451
452         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
453         if (rss_conf->rss_key)
454                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
455
456         rte_spinlock_unlock(&internal->rss_lock);
457
458         return 0;
459 }
460
461 static const struct eth_dev_ops ops = {
462         .dev_start = eth_dev_start,
463         .dev_stop = eth_dev_stop,
464         .dev_configure = eth_dev_configure,
465         .dev_infos_get = eth_dev_info,
466         .rx_queue_setup = eth_rx_queue_setup,
467         .tx_queue_setup = eth_tx_queue_setup,
468         .rx_queue_release = eth_queue_release,
469         .tx_queue_release = eth_queue_release,
470         .link_update = eth_link_update,
471         .stats_get = eth_stats_get,
472         .stats_reset = eth_stats_reset,
473         .reta_update = eth_rss_reta_update,
474         .reta_query = eth_rss_reta_query,
475         .rss_hash_update = eth_rss_hash_update,
476         .rss_hash_conf_get = eth_rss_hash_conf_get
477 };
478
479 static struct rte_vdev_driver pmd_null_drv;
480
481 int
482 eth_dev_null_create(const char *name,
483                 const unsigned numa_node,
484                 unsigned packet_size,
485                 unsigned packet_copy)
486 {
487         const unsigned nb_rx_queues = 1;
488         const unsigned nb_tx_queues = 1;
489         struct rte_eth_dev_data *data = NULL;
490         struct pmd_internals *internals = NULL;
491         struct rte_eth_dev *eth_dev = NULL;
492
493         static const uint8_t default_rss_key[40] = {
494                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
495                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
496                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
497                 0xBE, 0xAC, 0x01, 0xFA
498         };
499
500         if (name == NULL)
501                 return -EINVAL;
502
503         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
504                         numa_node);
505
506         /* now do all data allocation - for eth_dev structure, dummy pci driver
507          * and internal (private) data
508          */
509         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
510         if (data == NULL)
511                 goto error;
512
513         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
514         if (internals == NULL)
515                 goto error;
516
517         /* reserve an ethdev entry */
518         eth_dev = rte_eth_dev_allocate(name);
519         if (eth_dev == NULL)
520                 goto error;
521
522         /* now put it all together
523          * - store queue data in internals,
524          * - store numa_node info in ethdev data
525          * - point eth_dev_data to internals
526          * - and point eth_dev structure to new eth_dev_data structure
527          */
528         /* NOTE: we'll replace the data element, of originally allocated eth_dev
529          * so the nulls are local per-process */
530
531         internals->packet_size = packet_size;
532         internals->packet_copy = packet_copy;
533         internals->port_id = eth_dev->data->port_id;
534
535         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
536         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
537
538         rte_memcpy(internals->rss_key, default_rss_key, 40);
539
540         data->dev_private = internals;
541         data->port_id = eth_dev->data->port_id;
542         data->nb_rx_queues = (uint16_t)nb_rx_queues;
543         data->nb_tx_queues = (uint16_t)nb_tx_queues;
544         data->dev_link = pmd_link;
545         data->mac_addrs = &eth_addr;
546         strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
547
548         eth_dev->data = data;
549         eth_dev->dev_ops = &ops;
550
551         eth_dev->driver = NULL;
552         data->dev_flags = RTE_ETH_DEV_DETACHABLE;
553         data->kdrv = RTE_KDRV_NONE;
554         data->drv_name = pmd_null_drv.driver.name;
555         data->numa_node = numa_node;
556
557         /* finally assign rx and tx ops */
558         if (packet_copy) {
559                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
560                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
561         } else {
562                 eth_dev->rx_pkt_burst = eth_null_rx;
563                 eth_dev->tx_pkt_burst = eth_null_tx;
564         }
565
566         return 0;
567
568 error:
569         rte_free(data);
570         rte_free(internals);
571
572         return -1;
573 }
574
575 static inline int
576 get_packet_size_arg(const char *key __rte_unused,
577                 const char *value, void *extra_args)
578 {
579         const char *a = value;
580         unsigned *packet_size = extra_args;
581
582         if ((value == NULL) || (extra_args == NULL))
583                 return -EINVAL;
584
585         *packet_size = (unsigned)strtoul(a, NULL, 0);
586         if (*packet_size == UINT_MAX)
587                 return -1;
588
589         return 0;
590 }
591
592 static inline int
593 get_packet_copy_arg(const char *key __rte_unused,
594                 const char *value, void *extra_args)
595 {
596         const char *a = value;
597         unsigned *packet_copy = extra_args;
598
599         if ((value == NULL) || (extra_args == NULL))
600                 return -EINVAL;
601
602         *packet_copy = (unsigned)strtoul(a, NULL, 0);
603         if (*packet_copy == UINT_MAX)
604                 return -1;
605
606         return 0;
607 }
608
609 static int
610 rte_pmd_null_probe(struct rte_vdev_device *dev)
611 {
612         const char *name, *params;
613         unsigned numa_node;
614         unsigned packet_size = default_packet_size;
615         unsigned packet_copy = default_packet_copy;
616         struct rte_kvargs *kvlist = NULL;
617         int ret;
618
619         if (!dev)
620                 return -EINVAL;
621
622         name = rte_vdev_device_name(dev);
623         params = rte_vdev_device_args(dev);
624         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
625
626         numa_node = rte_socket_id();
627
628         if (params != NULL) {
629                 kvlist = rte_kvargs_parse(params, valid_arguments);
630                 if (kvlist == NULL)
631                         return -1;
632
633                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
634
635                         ret = rte_kvargs_process(kvlist,
636                                         ETH_NULL_PACKET_SIZE_ARG,
637                                         &get_packet_size_arg, &packet_size);
638                         if (ret < 0)
639                                 goto free_kvlist;
640                 }
641
642                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
643
644                         ret = rte_kvargs_process(kvlist,
645                                         ETH_NULL_PACKET_COPY_ARG,
646                                         &get_packet_copy_arg, &packet_copy);
647                         if (ret < 0)
648                                 goto free_kvlist;
649                 }
650         }
651
652         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
653                         "packet copy is %s\n", packet_size,
654                         packet_copy ? "enabled" : "disabled");
655
656         ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
657
658 free_kvlist:
659         if (kvlist)
660                 rte_kvargs_free(kvlist);
661         return ret;
662 }
663
664 static int
665 rte_pmd_null_remove(struct rte_vdev_device *dev)
666 {
667         struct rte_eth_dev *eth_dev = NULL;
668
669         if (!dev)
670                 return -EINVAL;
671
672         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
673                         rte_socket_id());
674
675         /* find the ethdev entry */
676         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
677         if (eth_dev == NULL)
678                 return -1;
679
680         rte_free(eth_dev->data->dev_private);
681         rte_free(eth_dev->data);
682
683         rte_eth_dev_release_port(eth_dev);
684
685         return 0;
686 }
687
688 static struct rte_vdev_driver pmd_null_drv = {
689         .probe = rte_pmd_null_probe,
690         .remove = rte_pmd_null_remove,
691 };
692
693 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
694 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
695 RTE_PMD_REGISTER_PARAM_STRING(net_null,
696         "size=<int> "
697         "copy=<int>");