ethdev: return diagnostic when setting MAC address
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42
43 #define ETH_NULL_PACKET_SIZE_ARG        "size"
44 #define ETH_NULL_PACKET_COPY_ARG        "copy"
45
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48
49 static const char *valid_arguments[] = {
50         ETH_NULL_PACKET_SIZE_ARG,
51         ETH_NULL_PACKET_COPY_ARG,
52         NULL
53 };
54
55 struct pmd_internals;
56
57 struct null_queue {
58         struct pmd_internals *internals;
59
60         struct rte_mempool *mb_pool;
61         struct rte_mbuf *dummy_packet;
62
63         rte_atomic64_t rx_pkts;
64         rte_atomic64_t tx_pkts;
65         rte_atomic64_t err_pkts;
66 };
67
68 struct pmd_internals {
69         unsigned packet_size;
70         unsigned packet_copy;
71         uint16_t port_id;
72
73         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75
76         struct ether_addr eth_addr;
77         /** Bit mask of RSS offloads, the bit offset also means flow type */
78         uint64_t flow_type_rss_offloads;
79
80         rte_spinlock_t rss_lock;
81
82         uint16_t reta_size;
83         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84                         RTE_RETA_GROUP_SIZE];
85
86         uint8_t rss_key[40];                /**< 40-byte hash key. */
87 };
88 static struct rte_eth_link pmd_link = {
89         .link_speed = ETH_SPEED_NUM_10G,
90         .link_duplex = ETH_LINK_FULL_DUPLEX,
91         .link_status = ETH_LINK_DOWN,
92         .link_autoneg = ETH_LINK_AUTONEG,
93 };
94
95 static uint16_t
96 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
97 {
98         int i;
99         struct null_queue *h = q;
100         unsigned packet_size;
101
102         if ((q == NULL) || (bufs == NULL))
103                 return 0;
104
105         packet_size = h->internals->packet_size;
106         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
107                 return 0;
108
109         for (i = 0; i < nb_bufs; i++) {
110                 bufs[i]->data_len = (uint16_t)packet_size;
111                 bufs[i]->pkt_len = packet_size;
112                 bufs[i]->port = h->internals->port_id;
113         }
114
115         rte_atomic64_add(&(h->rx_pkts), i);
116
117         return i;
118 }
119
120 static uint16_t
121 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
122 {
123         int i;
124         struct null_queue *h = q;
125         unsigned packet_size;
126
127         if ((q == NULL) || (bufs == NULL))
128                 return 0;
129
130         packet_size = h->internals->packet_size;
131         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
132                 return 0;
133
134         for (i = 0; i < nb_bufs; i++) {
135                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
136                                         packet_size);
137                 bufs[i]->data_len = (uint16_t)packet_size;
138                 bufs[i]->pkt_len = packet_size;
139                 bufs[i]->port = h->internals->port_id;
140         }
141
142         rte_atomic64_add(&(h->rx_pkts), i);
143
144         return i;
145 }
146
147 static uint16_t
148 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
149 {
150         int i;
151         struct null_queue *h = q;
152
153         if ((q == NULL) || (bufs == NULL))
154                 return 0;
155
156         for (i = 0; i < nb_bufs; i++)
157                 rte_pktmbuf_free(bufs[i]);
158
159         rte_atomic64_add(&(h->tx_pkts), i);
160
161         return i;
162 }
163
164 static uint16_t
165 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
166 {
167         int i;
168         struct null_queue *h = q;
169         unsigned packet_size;
170
171         if ((q == NULL) || (bufs == NULL))
172                 return 0;
173
174         packet_size = h->internals->packet_size;
175         for (i = 0; i < nb_bufs; i++) {
176                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
177                                         packet_size);
178                 rte_pktmbuf_free(bufs[i]);
179         }
180
181         rte_atomic64_add(&(h->tx_pkts), i);
182
183         return i;
184 }
185
186 static int
187 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
188 {
189         return 0;
190 }
191
192 static int
193 eth_dev_start(struct rte_eth_dev *dev)
194 {
195         if (dev == NULL)
196                 return -EINVAL;
197
198         dev->data->dev_link.link_status = ETH_LINK_UP;
199         return 0;
200 }
201
202 static void
203 eth_dev_stop(struct rte_eth_dev *dev)
204 {
205         if (dev == NULL)
206                 return;
207
208         dev->data->dev_link.link_status = ETH_LINK_DOWN;
209 }
210
211 static int
212 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
213                 uint16_t nb_rx_desc __rte_unused,
214                 unsigned int socket_id __rte_unused,
215                 const struct rte_eth_rxconf *rx_conf __rte_unused,
216                 struct rte_mempool *mb_pool)
217 {
218         struct rte_mbuf *dummy_packet;
219         struct pmd_internals *internals;
220         unsigned packet_size;
221
222         if ((dev == NULL) || (mb_pool == NULL))
223                 return -EINVAL;
224
225         internals = dev->data->dev_private;
226
227         if (rx_queue_id >= dev->data->nb_rx_queues)
228                 return -ENODEV;
229
230         packet_size = internals->packet_size;
231
232         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
233         dev->data->rx_queues[rx_queue_id] =
234                 &internals->rx_null_queues[rx_queue_id];
235         dummy_packet = rte_zmalloc_socket(NULL,
236                         packet_size, 0, dev->data->numa_node);
237         if (dummy_packet == NULL)
238                 return -ENOMEM;
239
240         internals->rx_null_queues[rx_queue_id].internals = internals;
241         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
242
243         return 0;
244 }
245
246 static int
247 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
248                 uint16_t nb_tx_desc __rte_unused,
249                 unsigned int socket_id __rte_unused,
250                 const struct rte_eth_txconf *tx_conf __rte_unused)
251 {
252         struct rte_mbuf *dummy_packet;
253         struct pmd_internals *internals;
254         unsigned packet_size;
255
256         if (dev == NULL)
257                 return -EINVAL;
258
259         internals = dev->data->dev_private;
260
261         if (tx_queue_id >= dev->data->nb_tx_queues)
262                 return -ENODEV;
263
264         packet_size = internals->packet_size;
265
266         dev->data->tx_queues[tx_queue_id] =
267                 &internals->tx_null_queues[tx_queue_id];
268         dummy_packet = rte_zmalloc_socket(NULL,
269                         packet_size, 0, dev->data->numa_node);
270         if (dummy_packet == NULL)
271                 return -ENOMEM;
272
273         internals->tx_null_queues[tx_queue_id].internals = internals;
274         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
275
276         return 0;
277 }
278
279 static int
280 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
281 {
282         return 0;
283 }
284
285 static void
286 eth_dev_info(struct rte_eth_dev *dev,
287                 struct rte_eth_dev_info *dev_info)
288 {
289         struct pmd_internals *internals;
290
291         if ((dev == NULL) || (dev_info == NULL))
292                 return;
293
294         internals = dev->data->dev_private;
295         dev_info->max_mac_addrs = 1;
296         dev_info->max_rx_pktlen = (uint32_t)-1;
297         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
298         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
299         dev_info->min_rx_bufsize = 0;
300         dev_info->reta_size = internals->reta_size;
301         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
302 }
303
304 static int
305 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
306 {
307         unsigned i, num_stats;
308         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
309         const struct pmd_internals *internal;
310
311         if ((dev == NULL) || (igb_stats == NULL))
312                 return -EINVAL;
313
314         internal = dev->data->dev_private;
315         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
316                         RTE_MIN(dev->data->nb_rx_queues,
317                                 RTE_DIM(internal->rx_null_queues)));
318         for (i = 0; i < num_stats; i++) {
319                 igb_stats->q_ipackets[i] =
320                         internal->rx_null_queues[i].rx_pkts.cnt;
321                 rx_total += igb_stats->q_ipackets[i];
322         }
323
324         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
325                         RTE_MIN(dev->data->nb_tx_queues,
326                                 RTE_DIM(internal->tx_null_queues)));
327         for (i = 0; i < num_stats; i++) {
328                 igb_stats->q_opackets[i] =
329                         internal->tx_null_queues[i].tx_pkts.cnt;
330                 igb_stats->q_errors[i] =
331                         internal->tx_null_queues[i].err_pkts.cnt;
332                 tx_total += igb_stats->q_opackets[i];
333                 tx_err_total += igb_stats->q_errors[i];
334         }
335
336         igb_stats->ipackets = rx_total;
337         igb_stats->opackets = tx_total;
338         igb_stats->oerrors = tx_err_total;
339
340         return 0;
341 }
342
343 static void
344 eth_stats_reset(struct rte_eth_dev *dev)
345 {
346         unsigned i;
347         struct pmd_internals *internal;
348
349         if (dev == NULL)
350                 return;
351
352         internal = dev->data->dev_private;
353         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
354                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
355         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
356                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
357                 internal->tx_null_queues[i].err_pkts.cnt = 0;
358         }
359 }
360
361 static void
362 eth_queue_release(void *q)
363 {
364         struct null_queue *nq;
365
366         if (q == NULL)
367                 return;
368
369         nq = q;
370         rte_free(nq->dummy_packet);
371 }
372
373 static int
374 eth_link_update(struct rte_eth_dev *dev __rte_unused,
375                 int wait_to_complete __rte_unused) { return 0; }
376
377 static int
378 eth_rss_reta_update(struct rte_eth_dev *dev,
379                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
380 {
381         int i, j;
382         struct pmd_internals *internal = dev->data->dev_private;
383
384         if (reta_size != internal->reta_size)
385                 return -EINVAL;
386
387         rte_spinlock_lock(&internal->rss_lock);
388
389         /* Copy RETA table */
390         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
391                 internal->reta_conf[i].mask = reta_conf[i].mask;
392                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
393                         if ((reta_conf[i].mask >> j) & 0x01)
394                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
395         }
396
397         rte_spinlock_unlock(&internal->rss_lock);
398
399         return 0;
400 }
401
402 static int
403 eth_rss_reta_query(struct rte_eth_dev *dev,
404                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
405 {
406         int i, j;
407         struct pmd_internals *internal = dev->data->dev_private;
408
409         if (reta_size != internal->reta_size)
410                 return -EINVAL;
411
412         rte_spinlock_lock(&internal->rss_lock);
413
414         /* Copy RETA table */
415         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
416                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
417                         if ((reta_conf[i].mask >> j) & 0x01)
418                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
419         }
420
421         rte_spinlock_unlock(&internal->rss_lock);
422
423         return 0;
424 }
425
426 static int
427 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
428 {
429         struct pmd_internals *internal = dev->data->dev_private;
430
431         rte_spinlock_lock(&internal->rss_lock);
432
433         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
434                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
435                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
436
437         if (rss_conf->rss_key)
438                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
439
440         rte_spinlock_unlock(&internal->rss_lock);
441
442         return 0;
443 }
444
445 static int
446 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
447                 struct rte_eth_rss_conf *rss_conf)
448 {
449         struct pmd_internals *internal = dev->data->dev_private;
450
451         rte_spinlock_lock(&internal->rss_lock);
452
453         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
454         if (rss_conf->rss_key)
455                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
456
457         rte_spinlock_unlock(&internal->rss_lock);
458
459         return 0;
460 }
461
462 static int
463 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
464                     __rte_unused struct ether_addr *addr)
465 {
466         return 0;
467 }
468
469 static const struct eth_dev_ops ops = {
470         .dev_start = eth_dev_start,
471         .dev_stop = eth_dev_stop,
472         .dev_configure = eth_dev_configure,
473         .dev_infos_get = eth_dev_info,
474         .rx_queue_setup = eth_rx_queue_setup,
475         .tx_queue_setup = eth_tx_queue_setup,
476         .rx_queue_release = eth_queue_release,
477         .tx_queue_release = eth_queue_release,
478         .mtu_set = eth_mtu_set,
479         .link_update = eth_link_update,
480         .mac_addr_set = eth_mac_address_set,
481         .stats_get = eth_stats_get,
482         .stats_reset = eth_stats_reset,
483         .reta_update = eth_rss_reta_update,
484         .reta_query = eth_rss_reta_query,
485         .rss_hash_update = eth_rss_hash_update,
486         .rss_hash_conf_get = eth_rss_hash_conf_get
487 };
488
489 static struct rte_vdev_driver pmd_null_drv;
490
491 static int
492 eth_dev_null_create(struct rte_vdev_device *dev,
493                 unsigned packet_size,
494                 unsigned packet_copy)
495 {
496         const unsigned nb_rx_queues = 1;
497         const unsigned nb_tx_queues = 1;
498         struct rte_eth_dev_data *data = NULL;
499         struct pmd_internals *internals = NULL;
500         struct rte_eth_dev *eth_dev = NULL;
501
502         static const uint8_t default_rss_key[40] = {
503                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
504                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
505                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
506                 0xBE, 0xAC, 0x01, 0xFA
507         };
508
509         if (dev->device.numa_node == SOCKET_ID_ANY)
510                 dev->device.numa_node = rte_socket_id();
511
512         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
513                 dev->device.numa_node);
514
515         /* now do all data allocation - for eth_dev structure, dummy pci driver
516          * and internal (private) data
517          */
518         data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
519                 dev->device.numa_node);
520         if (!data)
521                 return -ENOMEM;
522
523         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
524         if (!eth_dev) {
525                 rte_free(data);
526                 return -ENOMEM;
527         }
528         /* now put it all together
529          * - store queue data in internals,
530          * - store numa_node info in ethdev data
531          * - point eth_dev_data to internals
532          * - and point eth_dev structure to new eth_dev_data structure
533          */
534         /* NOTE: we'll replace the data element, of originally allocated eth_dev
535          * so the nulls are local per-process */
536
537         internals = eth_dev->data->dev_private;
538         internals->packet_size = packet_size;
539         internals->packet_copy = packet_copy;
540         internals->port_id = eth_dev->data->port_id;
541         eth_random_addr(internals->eth_addr.addr_bytes);
542
543         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
544         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
545
546         rte_memcpy(internals->rss_key, default_rss_key, 40);
547
548         rte_memcpy(data, eth_dev->data, sizeof(*data));
549         data->nb_rx_queues = (uint16_t)nb_rx_queues;
550         data->nb_tx_queues = (uint16_t)nb_tx_queues;
551         data->dev_link = pmd_link;
552         data->mac_addrs = &internals->eth_addr;
553
554         eth_dev->data = data;
555         eth_dev->dev_ops = &ops;
556
557         /* finally assign rx and tx ops */
558         if (packet_copy) {
559                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
560                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
561         } else {
562                 eth_dev->rx_pkt_burst = eth_null_rx;
563                 eth_dev->tx_pkt_burst = eth_null_tx;
564         }
565
566         return 0;
567 }
568
569 static inline int
570 get_packet_size_arg(const char *key __rte_unused,
571                 const char *value, void *extra_args)
572 {
573         const char *a = value;
574         unsigned *packet_size = extra_args;
575
576         if ((value == NULL) || (extra_args == NULL))
577                 return -EINVAL;
578
579         *packet_size = (unsigned)strtoul(a, NULL, 0);
580         if (*packet_size == UINT_MAX)
581                 return -1;
582
583         return 0;
584 }
585
586 static inline int
587 get_packet_copy_arg(const char *key __rte_unused,
588                 const char *value, void *extra_args)
589 {
590         const char *a = value;
591         unsigned *packet_copy = extra_args;
592
593         if ((value == NULL) || (extra_args == NULL))
594                 return -EINVAL;
595
596         *packet_copy = (unsigned)strtoul(a, NULL, 0);
597         if (*packet_copy == UINT_MAX)
598                 return -1;
599
600         return 0;
601 }
602
603 static int
604 rte_pmd_null_probe(struct rte_vdev_device *dev)
605 {
606         const char *name, *params;
607         unsigned packet_size = default_packet_size;
608         unsigned packet_copy = default_packet_copy;
609         struct rte_kvargs *kvlist = NULL;
610         int ret;
611
612         if (!dev)
613                 return -EINVAL;
614
615         name = rte_vdev_device_name(dev);
616         params = rte_vdev_device_args(dev);
617         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
618
619         if (params != NULL) {
620                 kvlist = rte_kvargs_parse(params, valid_arguments);
621                 if (kvlist == NULL)
622                         return -1;
623
624                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
625
626                         ret = rte_kvargs_process(kvlist,
627                                         ETH_NULL_PACKET_SIZE_ARG,
628                                         &get_packet_size_arg, &packet_size);
629                         if (ret < 0)
630                                 goto free_kvlist;
631                 }
632
633                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
634
635                         ret = rte_kvargs_process(kvlist,
636                                         ETH_NULL_PACKET_COPY_ARG,
637                                         &get_packet_copy_arg, &packet_copy);
638                         if (ret < 0)
639                                 goto free_kvlist;
640                 }
641         }
642
643         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
644                         "packet copy is %s\n", packet_size,
645                         packet_copy ? "enabled" : "disabled");
646
647         ret = eth_dev_null_create(dev, packet_size, packet_copy);
648
649 free_kvlist:
650         if (kvlist)
651                 rte_kvargs_free(kvlist);
652         return ret;
653 }
654
655 static int
656 rte_pmd_null_remove(struct rte_vdev_device *dev)
657 {
658         struct rte_eth_dev *eth_dev = NULL;
659
660         if (!dev)
661                 return -EINVAL;
662
663         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
664                         rte_socket_id());
665
666         /* find the ethdev entry */
667         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
668         if (eth_dev == NULL)
669                 return -1;
670
671         rte_free(eth_dev->data->dev_private);
672         rte_free(eth_dev->data);
673
674         rte_eth_dev_release_port(eth_dev);
675
676         return 0;
677 }
678
679 static struct rte_vdev_driver pmd_null_drv = {
680         .probe = rte_pmd_null_probe,
681         .remove = rte_pmd_null_remove,
682 };
683
684 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
685 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
686 RTE_PMD_REGISTER_PARAM_STRING(net_null,
687         "size=<int> "
688         "copy=<int>");