net/null: internalize create function
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_vdev.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
41
42 #define ETH_NULL_PACKET_SIZE_ARG        "size"
43 #define ETH_NULL_PACKET_COPY_ARG        "copy"
44
45 static unsigned default_packet_size = 64;
46 static unsigned default_packet_copy;
47
48 static const char *valid_arguments[] = {
49         ETH_NULL_PACKET_SIZE_ARG,
50         ETH_NULL_PACKET_COPY_ARG,
51         "driver",
52         NULL
53 };
54
55 struct pmd_internals;
56
57 struct null_queue {
58         struct pmd_internals *internals;
59
60         struct rte_mempool *mb_pool;
61         struct rte_mbuf *dummy_packet;
62
63         rte_atomic64_t rx_pkts;
64         rte_atomic64_t tx_pkts;
65         rte_atomic64_t err_pkts;
66 };
67
68 struct pmd_internals {
69         unsigned packet_size;
70         unsigned packet_copy;
71         uint8_t port_id;
72
73         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75
76         /** Bit mask of RSS offloads, the bit offset also means flow type */
77         uint64_t flow_type_rss_offloads;
78
79         rte_spinlock_t rss_lock;
80
81         uint16_t reta_size;
82         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
83                         RTE_RETA_GROUP_SIZE];
84
85         uint8_t rss_key[40];                /**< 40-byte hash key. */
86 };
87
88
89 static struct ether_addr eth_addr = { .addr_bytes = {0} };
90 static struct rte_eth_link pmd_link = {
91         .link_speed = ETH_SPEED_NUM_10G,
92         .link_duplex = ETH_LINK_FULL_DUPLEX,
93         .link_status = ETH_LINK_DOWN,
94         .link_autoneg = ETH_LINK_SPEED_AUTONEG,
95 };
96
97 static uint16_t
98 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
99 {
100         int i;
101         struct null_queue *h = q;
102         unsigned packet_size;
103
104         if ((q == NULL) || (bufs == NULL))
105                 return 0;
106
107         packet_size = h->internals->packet_size;
108         for (i = 0; i < nb_bufs; i++) {
109                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
110                 if (!bufs[i])
111                         break;
112                 bufs[i]->data_len = (uint16_t)packet_size;
113                 bufs[i]->pkt_len = packet_size;
114                 bufs[i]->port = h->internals->port_id;
115         }
116
117         rte_atomic64_add(&(h->rx_pkts), i);
118
119         return i;
120 }
121
122 static uint16_t
123 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
124 {
125         int i;
126         struct null_queue *h = q;
127         unsigned packet_size;
128
129         if ((q == NULL) || (bufs == NULL))
130                 return 0;
131
132         packet_size = h->internals->packet_size;
133         for (i = 0; i < nb_bufs; i++) {
134                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
135                 if (!bufs[i])
136                         break;
137                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
138                                         packet_size);
139                 bufs[i]->data_len = (uint16_t)packet_size;
140                 bufs[i]->pkt_len = packet_size;
141                 bufs[i]->nb_segs = 1;
142                 bufs[i]->next = NULL;
143                 bufs[i]->port = h->internals->port_id;
144         }
145
146         rte_atomic64_add(&(h->rx_pkts), i);
147
148         return i;
149 }
150
151 static uint16_t
152 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
153 {
154         int i;
155         struct null_queue *h = q;
156
157         if ((q == NULL) || (bufs == NULL))
158                 return 0;
159
160         for (i = 0; i < nb_bufs; i++)
161                 rte_pktmbuf_free(bufs[i]);
162
163         rte_atomic64_add(&(h->tx_pkts), i);
164
165         return i;
166 }
167
168 static uint16_t
169 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
170 {
171         int i;
172         struct null_queue *h = q;
173         unsigned packet_size;
174
175         if ((q == NULL) || (bufs == NULL))
176                 return 0;
177
178         packet_size = h->internals->packet_size;
179         for (i = 0; i < nb_bufs; i++) {
180                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
181                                         packet_size);
182                 rte_pktmbuf_free(bufs[i]);
183         }
184
185         rte_atomic64_add(&(h->tx_pkts), i);
186
187         return i;
188 }
189
190 static int
191 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
192 {
193         return 0;
194 }
195
196 static int
197 eth_dev_start(struct rte_eth_dev *dev)
198 {
199         if (dev == NULL)
200                 return -EINVAL;
201
202         dev->data->dev_link.link_status = ETH_LINK_UP;
203         return 0;
204 }
205
206 static void
207 eth_dev_stop(struct rte_eth_dev *dev)
208 {
209         if (dev == NULL)
210                 return;
211
212         dev->data->dev_link.link_status = ETH_LINK_DOWN;
213 }
214
215 static int
216 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
217                 uint16_t nb_rx_desc __rte_unused,
218                 unsigned int socket_id __rte_unused,
219                 const struct rte_eth_rxconf *rx_conf __rte_unused,
220                 struct rte_mempool *mb_pool)
221 {
222         struct rte_mbuf *dummy_packet;
223         struct pmd_internals *internals;
224         unsigned packet_size;
225
226         if ((dev == NULL) || (mb_pool == NULL))
227                 return -EINVAL;
228
229         internals = dev->data->dev_private;
230
231         if (rx_queue_id >= dev->data->nb_rx_queues)
232                 return -ENODEV;
233
234         packet_size = internals->packet_size;
235
236         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
237         dev->data->rx_queues[rx_queue_id] =
238                 &internals->rx_null_queues[rx_queue_id];
239         dummy_packet = rte_zmalloc_socket(NULL,
240                         packet_size, 0, dev->data->numa_node);
241         if (dummy_packet == NULL)
242                 return -ENOMEM;
243
244         internals->rx_null_queues[rx_queue_id].internals = internals;
245         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
246
247         return 0;
248 }
249
250 static int
251 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
252                 uint16_t nb_tx_desc __rte_unused,
253                 unsigned int socket_id __rte_unused,
254                 const struct rte_eth_txconf *tx_conf __rte_unused)
255 {
256         struct rte_mbuf *dummy_packet;
257         struct pmd_internals *internals;
258         unsigned packet_size;
259
260         if (dev == NULL)
261                 return -EINVAL;
262
263         internals = dev->data->dev_private;
264
265         if (tx_queue_id >= dev->data->nb_tx_queues)
266                 return -ENODEV;
267
268         packet_size = internals->packet_size;
269
270         dev->data->tx_queues[tx_queue_id] =
271                 &internals->tx_null_queues[tx_queue_id];
272         dummy_packet = rte_zmalloc_socket(NULL,
273                         packet_size, 0, dev->data->numa_node);
274         if (dummy_packet == NULL)
275                 return -ENOMEM;
276
277         internals->tx_null_queues[tx_queue_id].internals = internals;
278         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
279
280         return 0;
281 }
282
283
284 static void
285 eth_dev_info(struct rte_eth_dev *dev,
286                 struct rte_eth_dev_info *dev_info)
287 {
288         struct pmd_internals *internals;
289
290         if ((dev == NULL) || (dev_info == NULL))
291                 return;
292
293         internals = dev->data->dev_private;
294         dev_info->max_mac_addrs = 1;
295         dev_info->max_rx_pktlen = (uint32_t)-1;
296         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
297         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
298         dev_info->min_rx_bufsize = 0;
299         dev_info->reta_size = internals->reta_size;
300         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
301 }
302
303 static void
304 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
305 {
306         unsigned i, num_stats;
307         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
308         const struct pmd_internals *internal;
309
310         if ((dev == NULL) || (igb_stats == NULL))
311                 return;
312
313         internal = dev->data->dev_private;
314         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
315                         RTE_MIN(dev->data->nb_rx_queues,
316                                 RTE_DIM(internal->rx_null_queues)));
317         for (i = 0; i < num_stats; i++) {
318                 igb_stats->q_ipackets[i] =
319                         internal->rx_null_queues[i].rx_pkts.cnt;
320                 rx_total += igb_stats->q_ipackets[i];
321         }
322
323         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
324                         RTE_MIN(dev->data->nb_tx_queues,
325                                 RTE_DIM(internal->tx_null_queues)));
326         for (i = 0; i < num_stats; i++) {
327                 igb_stats->q_opackets[i] =
328                         internal->tx_null_queues[i].tx_pkts.cnt;
329                 igb_stats->q_errors[i] =
330                         internal->tx_null_queues[i].err_pkts.cnt;
331                 tx_total += igb_stats->q_opackets[i];
332                 tx_err_total += igb_stats->q_errors[i];
333         }
334
335         igb_stats->ipackets = rx_total;
336         igb_stats->opackets = tx_total;
337         igb_stats->oerrors = tx_err_total;
338 }
339
340 static void
341 eth_stats_reset(struct rte_eth_dev *dev)
342 {
343         unsigned i;
344         struct pmd_internals *internal;
345
346         if (dev == NULL)
347                 return;
348
349         internal = dev->data->dev_private;
350         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
351                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
352         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
353                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
354                 internal->tx_null_queues[i].err_pkts.cnt = 0;
355         }
356 }
357
358 static void
359 eth_queue_release(void *q)
360 {
361         struct null_queue *nq;
362
363         if (q == NULL)
364                 return;
365
366         nq = q;
367         rte_free(nq->dummy_packet);
368 }
369
370 static int
371 eth_link_update(struct rte_eth_dev *dev __rte_unused,
372                 int wait_to_complete __rte_unused) { return 0; }
373
374 static int
375 eth_rss_reta_update(struct rte_eth_dev *dev,
376                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
377 {
378         int i, j;
379         struct pmd_internals *internal = dev->data->dev_private;
380
381         if (reta_size != internal->reta_size)
382                 return -EINVAL;
383
384         rte_spinlock_lock(&internal->rss_lock);
385
386         /* Copy RETA table */
387         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
388                 internal->reta_conf[i].mask = reta_conf[i].mask;
389                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
390                         if ((reta_conf[i].mask >> j) & 0x01)
391                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
392         }
393
394         rte_spinlock_unlock(&internal->rss_lock);
395
396         return 0;
397 }
398
399 static int
400 eth_rss_reta_query(struct rte_eth_dev *dev,
401                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
402 {
403         int i, j;
404         struct pmd_internals *internal = dev->data->dev_private;
405
406         if (reta_size != internal->reta_size)
407                 return -EINVAL;
408
409         rte_spinlock_lock(&internal->rss_lock);
410
411         /* Copy RETA table */
412         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
413                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
414                         if ((reta_conf[i].mask >> j) & 0x01)
415                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
416         }
417
418         rte_spinlock_unlock(&internal->rss_lock);
419
420         return 0;
421 }
422
423 static int
424 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
425 {
426         struct pmd_internals *internal = dev->data->dev_private;
427
428         rte_spinlock_lock(&internal->rss_lock);
429
430         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
431                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
432                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
433
434         if (rss_conf->rss_key)
435                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
436
437         rte_spinlock_unlock(&internal->rss_lock);
438
439         return 0;
440 }
441
442 static int
443 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
444                 struct rte_eth_rss_conf *rss_conf)
445 {
446         struct pmd_internals *internal = dev->data->dev_private;
447
448         rte_spinlock_lock(&internal->rss_lock);
449
450         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
451         if (rss_conf->rss_key)
452                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
453
454         rte_spinlock_unlock(&internal->rss_lock);
455
456         return 0;
457 }
458
459 static const struct eth_dev_ops ops = {
460         .dev_start = eth_dev_start,
461         .dev_stop = eth_dev_stop,
462         .dev_configure = eth_dev_configure,
463         .dev_infos_get = eth_dev_info,
464         .rx_queue_setup = eth_rx_queue_setup,
465         .tx_queue_setup = eth_tx_queue_setup,
466         .rx_queue_release = eth_queue_release,
467         .tx_queue_release = eth_queue_release,
468         .link_update = eth_link_update,
469         .stats_get = eth_stats_get,
470         .stats_reset = eth_stats_reset,
471         .reta_update = eth_rss_reta_update,
472         .reta_query = eth_rss_reta_query,
473         .rss_hash_update = eth_rss_hash_update,
474         .rss_hash_conf_get = eth_rss_hash_conf_get
475 };
476
477 static struct rte_vdev_driver pmd_null_drv;
478
479 static int
480 eth_dev_null_create(const char *name,
481                 const unsigned numa_node,
482                 unsigned packet_size,
483                 unsigned packet_copy)
484 {
485         const unsigned nb_rx_queues = 1;
486         const unsigned nb_tx_queues = 1;
487         struct rte_eth_dev_data *data = NULL;
488         struct pmd_internals *internals = NULL;
489         struct rte_eth_dev *eth_dev = NULL;
490
491         static const uint8_t default_rss_key[40] = {
492                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
493                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
494                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
495                 0xBE, 0xAC, 0x01, 0xFA
496         };
497
498         if (name == NULL)
499                 return -EINVAL;
500
501         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
502                         numa_node);
503
504         /* now do all data allocation - for eth_dev structure, dummy pci driver
505          * and internal (private) data
506          */
507         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
508         if (data == NULL)
509                 goto error;
510
511         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
512         if (internals == NULL)
513                 goto error;
514
515         /* reserve an ethdev entry */
516         eth_dev = rte_eth_dev_allocate(name);
517         if (eth_dev == NULL)
518                 goto error;
519
520         /* now put it all together
521          * - store queue data in internals,
522          * - store numa_node info in ethdev data
523          * - point eth_dev_data to internals
524          * - and point eth_dev structure to new eth_dev_data structure
525          */
526         /* NOTE: we'll replace the data element, of originally allocated eth_dev
527          * so the nulls are local per-process */
528
529         internals->packet_size = packet_size;
530         internals->packet_copy = packet_copy;
531         internals->port_id = eth_dev->data->port_id;
532
533         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
534         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
535
536         rte_memcpy(internals->rss_key, default_rss_key, 40);
537
538         data->dev_private = internals;
539         data->port_id = eth_dev->data->port_id;
540         data->nb_rx_queues = (uint16_t)nb_rx_queues;
541         data->nb_tx_queues = (uint16_t)nb_tx_queues;
542         data->dev_link = pmd_link;
543         data->mac_addrs = &eth_addr;
544         strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
545
546         eth_dev->data = data;
547         eth_dev->dev_ops = &ops;
548
549         eth_dev->driver = NULL;
550         data->dev_flags = RTE_ETH_DEV_DETACHABLE;
551         data->kdrv = RTE_KDRV_NONE;
552         data->drv_name = pmd_null_drv.driver.name;
553         data->numa_node = numa_node;
554
555         /* finally assign rx and tx ops */
556         if (packet_copy) {
557                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
558                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
559         } else {
560                 eth_dev->rx_pkt_burst = eth_null_rx;
561                 eth_dev->tx_pkt_burst = eth_null_tx;
562         }
563
564         return 0;
565
566 error:
567         rte_free(data);
568         rte_free(internals);
569
570         return -1;
571 }
572
573 static inline int
574 get_packet_size_arg(const char *key __rte_unused,
575                 const char *value, void *extra_args)
576 {
577         const char *a = value;
578         unsigned *packet_size = extra_args;
579
580         if ((value == NULL) || (extra_args == NULL))
581                 return -EINVAL;
582
583         *packet_size = (unsigned)strtoul(a, NULL, 0);
584         if (*packet_size == UINT_MAX)
585                 return -1;
586
587         return 0;
588 }
589
590 static inline int
591 get_packet_copy_arg(const char *key __rte_unused,
592                 const char *value, void *extra_args)
593 {
594         const char *a = value;
595         unsigned *packet_copy = extra_args;
596
597         if ((value == NULL) || (extra_args == NULL))
598                 return -EINVAL;
599
600         *packet_copy = (unsigned)strtoul(a, NULL, 0);
601         if (*packet_copy == UINT_MAX)
602                 return -1;
603
604         return 0;
605 }
606
607 static int
608 rte_pmd_null_probe(struct rte_vdev_device *dev)
609 {
610         const char *name, *params;
611         unsigned numa_node;
612         unsigned packet_size = default_packet_size;
613         unsigned packet_copy = default_packet_copy;
614         struct rte_kvargs *kvlist = NULL;
615         int ret;
616
617         if (!dev)
618                 return -EINVAL;
619
620         name = rte_vdev_device_name(dev);
621         params = rte_vdev_device_args(dev);
622         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
623
624         numa_node = rte_socket_id();
625
626         if (params != NULL) {
627                 kvlist = rte_kvargs_parse(params, valid_arguments);
628                 if (kvlist == NULL)
629                         return -1;
630
631                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
632
633                         ret = rte_kvargs_process(kvlist,
634                                         ETH_NULL_PACKET_SIZE_ARG,
635                                         &get_packet_size_arg, &packet_size);
636                         if (ret < 0)
637                                 goto free_kvlist;
638                 }
639
640                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
641
642                         ret = rte_kvargs_process(kvlist,
643                                         ETH_NULL_PACKET_COPY_ARG,
644                                         &get_packet_copy_arg, &packet_copy);
645                         if (ret < 0)
646                                 goto free_kvlist;
647                 }
648         }
649
650         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
651                         "packet copy is %s\n", packet_size,
652                         packet_copy ? "enabled" : "disabled");
653
654         ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
655
656 free_kvlist:
657         if (kvlist)
658                 rte_kvargs_free(kvlist);
659         return ret;
660 }
661
662 static int
663 rte_pmd_null_remove(struct rte_vdev_device *dev)
664 {
665         struct rte_eth_dev *eth_dev = NULL;
666
667         if (!dev)
668                 return -EINVAL;
669
670         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
671                         rte_socket_id());
672
673         /* find the ethdev entry */
674         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
675         if (eth_dev == NULL)
676                 return -1;
677
678         rte_free(eth_dev->data->dev_private);
679         rte_free(eth_dev->data);
680
681         rte_eth_dev_release_port(eth_dev);
682
683         return 0;
684 }
685
686 static struct rte_vdev_driver pmd_null_drv = {
687         .probe = rte_pmd_null_probe,
688         .remove = rte_pmd_null_remove,
689 };
690
691 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
692 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
693 RTE_PMD_REGISTER_PARAM_STRING(net_null,
694         "size=<int> "
695         "copy=<int>");