ethdev: add return value to stats get dev op
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42
43 #define ETH_NULL_PACKET_SIZE_ARG        "size"
44 #define ETH_NULL_PACKET_COPY_ARG        "copy"
45
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48
49 static const char *valid_arguments[] = {
50         ETH_NULL_PACKET_SIZE_ARG,
51         ETH_NULL_PACKET_COPY_ARG,
52         NULL
53 };
54
55 struct pmd_internals;
56
57 struct null_queue {
58         struct pmd_internals *internals;
59
60         struct rte_mempool *mb_pool;
61         struct rte_mbuf *dummy_packet;
62
63         rte_atomic64_t rx_pkts;
64         rte_atomic64_t tx_pkts;
65         rte_atomic64_t err_pkts;
66 };
67
68 struct pmd_internals {
69         unsigned packet_size;
70         unsigned packet_copy;
71         uint16_t port_id;
72
73         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75
76         /** Bit mask of RSS offloads, the bit offset also means flow type */
77         uint64_t flow_type_rss_offloads;
78
79         rte_spinlock_t rss_lock;
80
81         uint16_t reta_size;
82         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
83                         RTE_RETA_GROUP_SIZE];
84
85         uint8_t rss_key[40];                /**< 40-byte hash key. */
86 };
87
88
89 static struct ether_addr eth_addr = { .addr_bytes = {0} };
90 static struct rte_eth_link pmd_link = {
91         .link_speed = ETH_SPEED_NUM_10G,
92         .link_duplex = ETH_LINK_FULL_DUPLEX,
93         .link_status = ETH_LINK_DOWN,
94         .link_autoneg = ETH_LINK_SPEED_AUTONEG,
95 };
96
97 static uint16_t
98 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
99 {
100         int i;
101         struct null_queue *h = q;
102         unsigned packet_size;
103
104         if ((q == NULL) || (bufs == NULL))
105                 return 0;
106
107         packet_size = h->internals->packet_size;
108         for (i = 0; i < nb_bufs; i++) {
109                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
110                 if (!bufs[i])
111                         break;
112                 bufs[i]->data_len = (uint16_t)packet_size;
113                 bufs[i]->pkt_len = packet_size;
114                 bufs[i]->port = h->internals->port_id;
115         }
116
117         rte_atomic64_add(&(h->rx_pkts), i);
118
119         return i;
120 }
121
122 static uint16_t
123 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
124 {
125         int i;
126         struct null_queue *h = q;
127         unsigned packet_size;
128
129         if ((q == NULL) || (bufs == NULL))
130                 return 0;
131
132         packet_size = h->internals->packet_size;
133         for (i = 0; i < nb_bufs; i++) {
134                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
135                 if (!bufs[i])
136                         break;
137                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
138                                         packet_size);
139                 bufs[i]->data_len = (uint16_t)packet_size;
140                 bufs[i]->pkt_len = packet_size;
141                 bufs[i]->port = h->internals->port_id;
142         }
143
144         rte_atomic64_add(&(h->rx_pkts), i);
145
146         return i;
147 }
148
149 static uint16_t
150 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
151 {
152         int i;
153         struct null_queue *h = q;
154
155         if ((q == NULL) || (bufs == NULL))
156                 return 0;
157
158         for (i = 0; i < nb_bufs; i++)
159                 rte_pktmbuf_free(bufs[i]);
160
161         rte_atomic64_add(&(h->tx_pkts), i);
162
163         return i;
164 }
165
166 static uint16_t
167 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
168 {
169         int i;
170         struct null_queue *h = q;
171         unsigned packet_size;
172
173         if ((q == NULL) || (bufs == NULL))
174                 return 0;
175
176         packet_size = h->internals->packet_size;
177         for (i = 0; i < nb_bufs; i++) {
178                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
179                                         packet_size);
180                 rte_pktmbuf_free(bufs[i]);
181         }
182
183         rte_atomic64_add(&(h->tx_pkts), i);
184
185         return i;
186 }
187
188 static int
189 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
190 {
191         return 0;
192 }
193
194 static int
195 eth_dev_start(struct rte_eth_dev *dev)
196 {
197         if (dev == NULL)
198                 return -EINVAL;
199
200         dev->data->dev_link.link_status = ETH_LINK_UP;
201         return 0;
202 }
203
204 static void
205 eth_dev_stop(struct rte_eth_dev *dev)
206 {
207         if (dev == NULL)
208                 return;
209
210         dev->data->dev_link.link_status = ETH_LINK_DOWN;
211 }
212
213 static int
214 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
215                 uint16_t nb_rx_desc __rte_unused,
216                 unsigned int socket_id __rte_unused,
217                 const struct rte_eth_rxconf *rx_conf __rte_unused,
218                 struct rte_mempool *mb_pool)
219 {
220         struct rte_mbuf *dummy_packet;
221         struct pmd_internals *internals;
222         unsigned packet_size;
223
224         if ((dev == NULL) || (mb_pool == NULL))
225                 return -EINVAL;
226
227         internals = dev->data->dev_private;
228
229         if (rx_queue_id >= dev->data->nb_rx_queues)
230                 return -ENODEV;
231
232         packet_size = internals->packet_size;
233
234         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
235         dev->data->rx_queues[rx_queue_id] =
236                 &internals->rx_null_queues[rx_queue_id];
237         dummy_packet = rte_zmalloc_socket(NULL,
238                         packet_size, 0, dev->data->numa_node);
239         if (dummy_packet == NULL)
240                 return -ENOMEM;
241
242         internals->rx_null_queues[rx_queue_id].internals = internals;
243         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
244
245         return 0;
246 }
247
248 static int
249 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
250                 uint16_t nb_tx_desc __rte_unused,
251                 unsigned int socket_id __rte_unused,
252                 const struct rte_eth_txconf *tx_conf __rte_unused)
253 {
254         struct rte_mbuf *dummy_packet;
255         struct pmd_internals *internals;
256         unsigned packet_size;
257
258         if (dev == NULL)
259                 return -EINVAL;
260
261         internals = dev->data->dev_private;
262
263         if (tx_queue_id >= dev->data->nb_tx_queues)
264                 return -ENODEV;
265
266         packet_size = internals->packet_size;
267
268         dev->data->tx_queues[tx_queue_id] =
269                 &internals->tx_null_queues[tx_queue_id];
270         dummy_packet = rte_zmalloc_socket(NULL,
271                         packet_size, 0, dev->data->numa_node);
272         if (dummy_packet == NULL)
273                 return -ENOMEM;
274
275         internals->tx_null_queues[tx_queue_id].internals = internals;
276         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
277
278         return 0;
279 }
280
281
282 static void
283 eth_dev_info(struct rte_eth_dev *dev,
284                 struct rte_eth_dev_info *dev_info)
285 {
286         struct pmd_internals *internals;
287
288         if ((dev == NULL) || (dev_info == NULL))
289                 return;
290
291         internals = dev->data->dev_private;
292         dev_info->max_mac_addrs = 1;
293         dev_info->max_rx_pktlen = (uint32_t)-1;
294         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
295         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
296         dev_info->min_rx_bufsize = 0;
297         dev_info->reta_size = internals->reta_size;
298         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
299 }
300
301 static int
302 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
303 {
304         unsigned i, num_stats;
305         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
306         const struct pmd_internals *internal;
307
308         if ((dev == NULL) || (igb_stats == NULL))
309                 return -EINVAL;
310
311         internal = dev->data->dev_private;
312         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
313                         RTE_MIN(dev->data->nb_rx_queues,
314                                 RTE_DIM(internal->rx_null_queues)));
315         for (i = 0; i < num_stats; i++) {
316                 igb_stats->q_ipackets[i] =
317                         internal->rx_null_queues[i].rx_pkts.cnt;
318                 rx_total += igb_stats->q_ipackets[i];
319         }
320
321         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
322                         RTE_MIN(dev->data->nb_tx_queues,
323                                 RTE_DIM(internal->tx_null_queues)));
324         for (i = 0; i < num_stats; i++) {
325                 igb_stats->q_opackets[i] =
326                         internal->tx_null_queues[i].tx_pkts.cnt;
327                 igb_stats->q_errors[i] =
328                         internal->tx_null_queues[i].err_pkts.cnt;
329                 tx_total += igb_stats->q_opackets[i];
330                 tx_err_total += igb_stats->q_errors[i];
331         }
332
333         igb_stats->ipackets = rx_total;
334         igb_stats->opackets = tx_total;
335         igb_stats->oerrors = tx_err_total;
336
337         return 0;
338 }
339
340 static void
341 eth_stats_reset(struct rte_eth_dev *dev)
342 {
343         unsigned i;
344         struct pmd_internals *internal;
345
346         if (dev == NULL)
347                 return;
348
349         internal = dev->data->dev_private;
350         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
351                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
352         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
353                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
354                 internal->tx_null_queues[i].err_pkts.cnt = 0;
355         }
356 }
357
358 static void
359 eth_queue_release(void *q)
360 {
361         struct null_queue *nq;
362
363         if (q == NULL)
364                 return;
365
366         nq = q;
367         rte_free(nq->dummy_packet);
368 }
369
370 static int
371 eth_link_update(struct rte_eth_dev *dev __rte_unused,
372                 int wait_to_complete __rte_unused) { return 0; }
373
374 static int
375 eth_rss_reta_update(struct rte_eth_dev *dev,
376                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
377 {
378         int i, j;
379         struct pmd_internals *internal = dev->data->dev_private;
380
381         if (reta_size != internal->reta_size)
382                 return -EINVAL;
383
384         rte_spinlock_lock(&internal->rss_lock);
385
386         /* Copy RETA table */
387         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
388                 internal->reta_conf[i].mask = reta_conf[i].mask;
389                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
390                         if ((reta_conf[i].mask >> j) & 0x01)
391                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
392         }
393
394         rte_spinlock_unlock(&internal->rss_lock);
395
396         return 0;
397 }
398
399 static int
400 eth_rss_reta_query(struct rte_eth_dev *dev,
401                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
402 {
403         int i, j;
404         struct pmd_internals *internal = dev->data->dev_private;
405
406         if (reta_size != internal->reta_size)
407                 return -EINVAL;
408
409         rte_spinlock_lock(&internal->rss_lock);
410
411         /* Copy RETA table */
412         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
413                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
414                         if ((reta_conf[i].mask >> j) & 0x01)
415                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
416         }
417
418         rte_spinlock_unlock(&internal->rss_lock);
419
420         return 0;
421 }
422
423 static int
424 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
425 {
426         struct pmd_internals *internal = dev->data->dev_private;
427
428         rte_spinlock_lock(&internal->rss_lock);
429
430         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
431                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
432                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
433
434         if (rss_conf->rss_key)
435                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
436
437         rte_spinlock_unlock(&internal->rss_lock);
438
439         return 0;
440 }
441
442 static int
443 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
444                 struct rte_eth_rss_conf *rss_conf)
445 {
446         struct pmd_internals *internal = dev->data->dev_private;
447
448         rte_spinlock_lock(&internal->rss_lock);
449
450         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
451         if (rss_conf->rss_key)
452                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
453
454         rte_spinlock_unlock(&internal->rss_lock);
455
456         return 0;
457 }
458
459 static const struct eth_dev_ops ops = {
460         .dev_start = eth_dev_start,
461         .dev_stop = eth_dev_stop,
462         .dev_configure = eth_dev_configure,
463         .dev_infos_get = eth_dev_info,
464         .rx_queue_setup = eth_rx_queue_setup,
465         .tx_queue_setup = eth_tx_queue_setup,
466         .rx_queue_release = eth_queue_release,
467         .tx_queue_release = eth_queue_release,
468         .link_update = eth_link_update,
469         .stats_get = eth_stats_get,
470         .stats_reset = eth_stats_reset,
471         .reta_update = eth_rss_reta_update,
472         .reta_query = eth_rss_reta_query,
473         .rss_hash_update = eth_rss_hash_update,
474         .rss_hash_conf_get = eth_rss_hash_conf_get
475 };
476
477 static struct rte_vdev_driver pmd_null_drv;
478
479 static int
480 eth_dev_null_create(struct rte_vdev_device *dev,
481                 unsigned packet_size,
482                 unsigned packet_copy)
483 {
484         const unsigned nb_rx_queues = 1;
485         const unsigned nb_tx_queues = 1;
486         struct rte_eth_dev_data *data = NULL;
487         struct pmd_internals *internals = NULL;
488         struct rte_eth_dev *eth_dev = NULL;
489
490         static const uint8_t default_rss_key[40] = {
491                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
492                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
493                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
494                 0xBE, 0xAC, 0x01, 0xFA
495         };
496
497         if (dev->device.numa_node == SOCKET_ID_ANY)
498                 dev->device.numa_node = rte_socket_id();
499
500         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
501                 dev->device.numa_node);
502
503         /* now do all data allocation - for eth_dev structure, dummy pci driver
504          * and internal (private) data
505          */
506         data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
507                 dev->device.numa_node);
508         if (!data)
509                 return -ENOMEM;
510
511         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
512         if (!eth_dev) {
513                 rte_free(data);
514                 return -ENOMEM;
515         }
516
517         /* now put it all together
518          * - store queue data in internals,
519          * - store numa_node info in ethdev data
520          * - point eth_dev_data to internals
521          * - and point eth_dev structure to new eth_dev_data structure
522          */
523         /* NOTE: we'll replace the data element, of originally allocated eth_dev
524          * so the nulls are local per-process */
525
526         internals = eth_dev->data->dev_private;
527         internals->packet_size = packet_size;
528         internals->packet_copy = packet_copy;
529         internals->port_id = eth_dev->data->port_id;
530
531         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
532         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
533
534         rte_memcpy(internals->rss_key, default_rss_key, 40);
535
536         rte_memcpy(data, eth_dev->data, sizeof(*data));
537         data->nb_rx_queues = (uint16_t)nb_rx_queues;
538         data->nb_tx_queues = (uint16_t)nb_tx_queues;
539         data->dev_link = pmd_link;
540         data->mac_addrs = &eth_addr;
541
542         eth_dev->data = data;
543         eth_dev->dev_ops = &ops;
544
545         data->dev_flags = RTE_ETH_DEV_DETACHABLE;
546
547         /* finally assign rx and tx ops */
548         if (packet_copy) {
549                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
550                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
551         } else {
552                 eth_dev->rx_pkt_burst = eth_null_rx;
553                 eth_dev->tx_pkt_burst = eth_null_tx;
554         }
555
556         return 0;
557 }
558
559 static inline int
560 get_packet_size_arg(const char *key __rte_unused,
561                 const char *value, void *extra_args)
562 {
563         const char *a = value;
564         unsigned *packet_size = extra_args;
565
566         if ((value == NULL) || (extra_args == NULL))
567                 return -EINVAL;
568
569         *packet_size = (unsigned)strtoul(a, NULL, 0);
570         if (*packet_size == UINT_MAX)
571                 return -1;
572
573         return 0;
574 }
575
576 static inline int
577 get_packet_copy_arg(const char *key __rte_unused,
578                 const char *value, void *extra_args)
579 {
580         const char *a = value;
581         unsigned *packet_copy = extra_args;
582
583         if ((value == NULL) || (extra_args == NULL))
584                 return -EINVAL;
585
586         *packet_copy = (unsigned)strtoul(a, NULL, 0);
587         if (*packet_copy == UINT_MAX)
588                 return -1;
589
590         return 0;
591 }
592
593 static int
594 rte_pmd_null_probe(struct rte_vdev_device *dev)
595 {
596         const char *name, *params;
597         unsigned packet_size = default_packet_size;
598         unsigned packet_copy = default_packet_copy;
599         struct rte_kvargs *kvlist = NULL;
600         int ret;
601
602         if (!dev)
603                 return -EINVAL;
604
605         name = rte_vdev_device_name(dev);
606         params = rte_vdev_device_args(dev);
607         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
608
609         if (params != NULL) {
610                 kvlist = rte_kvargs_parse(params, valid_arguments);
611                 if (kvlist == NULL)
612                         return -1;
613
614                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
615
616                         ret = rte_kvargs_process(kvlist,
617                                         ETH_NULL_PACKET_SIZE_ARG,
618                                         &get_packet_size_arg, &packet_size);
619                         if (ret < 0)
620                                 goto free_kvlist;
621                 }
622
623                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
624
625                         ret = rte_kvargs_process(kvlist,
626                                         ETH_NULL_PACKET_COPY_ARG,
627                                         &get_packet_copy_arg, &packet_copy);
628                         if (ret < 0)
629                                 goto free_kvlist;
630                 }
631         }
632
633         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
634                         "packet copy is %s\n", packet_size,
635                         packet_copy ? "enabled" : "disabled");
636
637         ret = eth_dev_null_create(dev, packet_size, packet_copy);
638
639 free_kvlist:
640         if (kvlist)
641                 rte_kvargs_free(kvlist);
642         return ret;
643 }
644
645 static int
646 rte_pmd_null_remove(struct rte_vdev_device *dev)
647 {
648         struct rte_eth_dev *eth_dev = NULL;
649
650         if (!dev)
651                 return -EINVAL;
652
653         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
654                         rte_socket_id());
655
656         /* find the ethdev entry */
657         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
658         if (eth_dev == NULL)
659                 return -1;
660
661         rte_free(eth_dev->data->dev_private);
662         rte_free(eth_dev->data);
663
664         rte_eth_dev_release_port(eth_dev);
665
666         return 0;
667 }
668
669 static struct rte_vdev_driver pmd_null_drv = {
670         .probe = rte_pmd_null_probe,
671         .remove = rte_pmd_null_remove,
672 };
673
674 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
675 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
676 RTE_PMD_REGISTER_PARAM_STRING(net_null,
677         "size=<int> "
678         "copy=<int>");