31cbb84d031d863705b877707f2bc9cf53f23013
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42
43 #define ETH_NULL_PACKET_SIZE_ARG        "size"
44 #define ETH_NULL_PACKET_COPY_ARG        "copy"
45
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48
49 static const char *valid_arguments[] = {
50         ETH_NULL_PACKET_SIZE_ARG,
51         ETH_NULL_PACKET_COPY_ARG,
52         NULL
53 };
54
55 struct pmd_internals;
56
57 struct null_queue {
58         struct pmd_internals *internals;
59
60         struct rte_mempool *mb_pool;
61         struct rte_mbuf *dummy_packet;
62
63         rte_atomic64_t rx_pkts;
64         rte_atomic64_t tx_pkts;
65         rte_atomic64_t err_pkts;
66 };
67
68 struct pmd_internals {
69         unsigned packet_size;
70         unsigned packet_copy;
71         uint16_t port_id;
72
73         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75
76         struct rte_ether_addr eth_addr;
77         /** Bit mask of RSS offloads, the bit offset also means flow type */
78         uint64_t flow_type_rss_offloads;
79
80         rte_spinlock_t rss_lock;
81
82         uint16_t reta_size;
83         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84                         RTE_RETA_GROUP_SIZE];
85
86         uint8_t rss_key[40];                /**< 40-byte hash key. */
87 };
88 static struct rte_eth_link pmd_link = {
89         .link_speed = ETH_SPEED_NUM_10G,
90         .link_duplex = ETH_LINK_FULL_DUPLEX,
91         .link_status = ETH_LINK_DOWN,
92         .link_autoneg = ETH_LINK_FIXED,
93 };
94
95 static int eth_null_logtype;
96
97 #define PMD_LOG(level, fmt, args...) \
98         rte_log(RTE_LOG_ ## level, eth_null_logtype, \
99                 "%s(): " fmt "\n", __func__, ##args)
100
101 static uint16_t
102 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
103 {
104         int i;
105         struct null_queue *h = q;
106         unsigned packet_size;
107
108         if ((q == NULL) || (bufs == NULL))
109                 return 0;
110
111         packet_size = h->internals->packet_size;
112         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
113                 return 0;
114
115         for (i = 0; i < nb_bufs; i++) {
116                 bufs[i]->data_len = (uint16_t)packet_size;
117                 bufs[i]->pkt_len = packet_size;
118                 bufs[i]->port = h->internals->port_id;
119         }
120
121         rte_atomic64_add(&(h->rx_pkts), i);
122
123         return i;
124 }
125
126 static uint16_t
127 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
128 {
129         int i;
130         struct null_queue *h = q;
131         unsigned packet_size;
132
133         if ((q == NULL) || (bufs == NULL))
134                 return 0;
135
136         packet_size = h->internals->packet_size;
137         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
138                 return 0;
139
140         for (i = 0; i < nb_bufs; i++) {
141                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
142                                         packet_size);
143                 bufs[i]->data_len = (uint16_t)packet_size;
144                 bufs[i]->pkt_len = packet_size;
145                 bufs[i]->port = h->internals->port_id;
146         }
147
148         rte_atomic64_add(&(h->rx_pkts), i);
149
150         return i;
151 }
152
153 static uint16_t
154 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
155 {
156         int i;
157         struct null_queue *h = q;
158
159         if ((q == NULL) || (bufs == NULL))
160                 return 0;
161
162         for (i = 0; i < nb_bufs; i++)
163                 rte_pktmbuf_free(bufs[i]);
164
165         rte_atomic64_add(&(h->tx_pkts), i);
166
167         return i;
168 }
169
170 static uint16_t
171 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
172 {
173         int i;
174         struct null_queue *h = q;
175         unsigned packet_size;
176
177         if ((q == NULL) || (bufs == NULL))
178                 return 0;
179
180         packet_size = h->internals->packet_size;
181         for (i = 0; i < nb_bufs; i++) {
182                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
183                                         packet_size);
184                 rte_pktmbuf_free(bufs[i]);
185         }
186
187         rte_atomic64_add(&(h->tx_pkts), i);
188
189         return i;
190 }
191
192 static int
193 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
194 {
195         return 0;
196 }
197
198 static int
199 eth_dev_start(struct rte_eth_dev *dev)
200 {
201         if (dev == NULL)
202                 return -EINVAL;
203
204         dev->data->dev_link.link_status = ETH_LINK_UP;
205         return 0;
206 }
207
208 static void
209 eth_dev_stop(struct rte_eth_dev *dev)
210 {
211         if (dev == NULL)
212                 return;
213
214         dev->data->dev_link.link_status = ETH_LINK_DOWN;
215 }
216
217 static int
218 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
219                 uint16_t nb_rx_desc __rte_unused,
220                 unsigned int socket_id __rte_unused,
221                 const struct rte_eth_rxconf *rx_conf __rte_unused,
222                 struct rte_mempool *mb_pool)
223 {
224         struct rte_mbuf *dummy_packet;
225         struct pmd_internals *internals;
226         unsigned packet_size;
227
228         if ((dev == NULL) || (mb_pool == NULL))
229                 return -EINVAL;
230
231         internals = dev->data->dev_private;
232
233         if (rx_queue_id >= dev->data->nb_rx_queues)
234                 return -ENODEV;
235
236         packet_size = internals->packet_size;
237
238         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
239         dev->data->rx_queues[rx_queue_id] =
240                 &internals->rx_null_queues[rx_queue_id];
241         dummy_packet = rte_zmalloc_socket(NULL,
242                         packet_size, 0, dev->data->numa_node);
243         if (dummy_packet == NULL)
244                 return -ENOMEM;
245
246         internals->rx_null_queues[rx_queue_id].internals = internals;
247         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
248
249         return 0;
250 }
251
252 static int
253 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
254                 uint16_t nb_tx_desc __rte_unused,
255                 unsigned int socket_id __rte_unused,
256                 const struct rte_eth_txconf *tx_conf __rte_unused)
257 {
258         struct rte_mbuf *dummy_packet;
259         struct pmd_internals *internals;
260         unsigned packet_size;
261
262         if (dev == NULL)
263                 return -EINVAL;
264
265         internals = dev->data->dev_private;
266
267         if (tx_queue_id >= dev->data->nb_tx_queues)
268                 return -ENODEV;
269
270         packet_size = internals->packet_size;
271
272         dev->data->tx_queues[tx_queue_id] =
273                 &internals->tx_null_queues[tx_queue_id];
274         dummy_packet = rte_zmalloc_socket(NULL,
275                         packet_size, 0, dev->data->numa_node);
276         if (dummy_packet == NULL)
277                 return -ENOMEM;
278
279         internals->tx_null_queues[tx_queue_id].internals = internals;
280         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
281
282         return 0;
283 }
284
285 static int
286 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
287 {
288         return 0;
289 }
290
291 static void
292 eth_dev_info(struct rte_eth_dev *dev,
293                 struct rte_eth_dev_info *dev_info)
294 {
295         struct pmd_internals *internals;
296
297         if ((dev == NULL) || (dev_info == NULL))
298                 return;
299
300         internals = dev->data->dev_private;
301         dev_info->max_mac_addrs = 1;
302         dev_info->max_rx_pktlen = (uint32_t)-1;
303         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
304         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
305         dev_info->min_rx_bufsize = 0;
306         dev_info->reta_size = internals->reta_size;
307         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
308 }
309
310 static int
311 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
312 {
313         unsigned i, num_stats;
314         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
315         const struct pmd_internals *internal;
316
317         if ((dev == NULL) || (igb_stats == NULL))
318                 return -EINVAL;
319
320         internal = dev->data->dev_private;
321         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
322                         RTE_MIN(dev->data->nb_rx_queues,
323                                 RTE_DIM(internal->rx_null_queues)));
324         for (i = 0; i < num_stats; i++) {
325                 igb_stats->q_ipackets[i] =
326                         internal->rx_null_queues[i].rx_pkts.cnt;
327                 rx_total += igb_stats->q_ipackets[i];
328         }
329
330         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
331                         RTE_MIN(dev->data->nb_tx_queues,
332                                 RTE_DIM(internal->tx_null_queues)));
333         for (i = 0; i < num_stats; i++) {
334                 igb_stats->q_opackets[i] =
335                         internal->tx_null_queues[i].tx_pkts.cnt;
336                 tx_total += igb_stats->q_opackets[i];
337                 tx_err_total += internal->tx_null_queues[i].err_pkts.cnt;
338         }
339
340         igb_stats->ipackets = rx_total;
341         igb_stats->opackets = tx_total;
342         igb_stats->oerrors = tx_err_total;
343
344         return 0;
345 }
346
347 static void
348 eth_stats_reset(struct rte_eth_dev *dev)
349 {
350         unsigned i;
351         struct pmd_internals *internal;
352
353         if (dev == NULL)
354                 return;
355
356         internal = dev->data->dev_private;
357         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
358                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
359         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
360                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
361                 internal->tx_null_queues[i].err_pkts.cnt = 0;
362         }
363 }
364
365 static void
366 eth_queue_release(void *q)
367 {
368         struct null_queue *nq;
369
370         if (q == NULL)
371                 return;
372
373         nq = q;
374         rte_free(nq->dummy_packet);
375 }
376
377 static int
378 eth_link_update(struct rte_eth_dev *dev __rte_unused,
379                 int wait_to_complete __rte_unused) { return 0; }
380
381 static int
382 eth_rss_reta_update(struct rte_eth_dev *dev,
383                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
384 {
385         int i, j;
386         struct pmd_internals *internal = dev->data->dev_private;
387
388         if (reta_size != internal->reta_size)
389                 return -EINVAL;
390
391         rte_spinlock_lock(&internal->rss_lock);
392
393         /* Copy RETA table */
394         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
395                 internal->reta_conf[i].mask = reta_conf[i].mask;
396                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
397                         if ((reta_conf[i].mask >> j) & 0x01)
398                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
399         }
400
401         rte_spinlock_unlock(&internal->rss_lock);
402
403         return 0;
404 }
405
406 static int
407 eth_rss_reta_query(struct rte_eth_dev *dev,
408                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
409 {
410         int i, j;
411         struct pmd_internals *internal = dev->data->dev_private;
412
413         if (reta_size != internal->reta_size)
414                 return -EINVAL;
415
416         rte_spinlock_lock(&internal->rss_lock);
417
418         /* Copy RETA table */
419         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
420                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
421                         if ((reta_conf[i].mask >> j) & 0x01)
422                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
423         }
424
425         rte_spinlock_unlock(&internal->rss_lock);
426
427         return 0;
428 }
429
430 static int
431 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
432 {
433         struct pmd_internals *internal = dev->data->dev_private;
434
435         rte_spinlock_lock(&internal->rss_lock);
436
437         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
438                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
439                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
440
441         if (rss_conf->rss_key)
442                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
443
444         rte_spinlock_unlock(&internal->rss_lock);
445
446         return 0;
447 }
448
449 static int
450 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
451                 struct rte_eth_rss_conf *rss_conf)
452 {
453         struct pmd_internals *internal = dev->data->dev_private;
454
455         rte_spinlock_lock(&internal->rss_lock);
456
457         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
458         if (rss_conf->rss_key)
459                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
460
461         rte_spinlock_unlock(&internal->rss_lock);
462
463         return 0;
464 }
465
466 static int
467 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
468                     __rte_unused struct rte_ether_addr *addr)
469 {
470         return 0;
471 }
472
473 static const struct eth_dev_ops ops = {
474         .dev_start = eth_dev_start,
475         .dev_stop = eth_dev_stop,
476         .dev_configure = eth_dev_configure,
477         .dev_infos_get = eth_dev_info,
478         .rx_queue_setup = eth_rx_queue_setup,
479         .tx_queue_setup = eth_tx_queue_setup,
480         .rx_queue_release = eth_queue_release,
481         .tx_queue_release = eth_queue_release,
482         .mtu_set = eth_mtu_set,
483         .link_update = eth_link_update,
484         .mac_addr_set = eth_mac_address_set,
485         .stats_get = eth_stats_get,
486         .stats_reset = eth_stats_reset,
487         .reta_update = eth_rss_reta_update,
488         .reta_query = eth_rss_reta_query,
489         .rss_hash_update = eth_rss_hash_update,
490         .rss_hash_conf_get = eth_rss_hash_conf_get
491 };
492
493 static int
494 eth_dev_null_create(struct rte_vdev_device *dev,
495                 unsigned packet_size,
496                 unsigned packet_copy)
497 {
498         const unsigned nb_rx_queues = 1;
499         const unsigned nb_tx_queues = 1;
500         struct rte_eth_dev_data *data;
501         struct pmd_internals *internals = NULL;
502         struct rte_eth_dev *eth_dev = NULL;
503
504         static const uint8_t default_rss_key[40] = {
505                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
506                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
507                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
508                 0xBE, 0xAC, 0x01, 0xFA
509         };
510
511         if (dev->device.numa_node == SOCKET_ID_ANY)
512                 dev->device.numa_node = rte_socket_id();
513
514         PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
515                 dev->device.numa_node);
516
517         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
518         if (!eth_dev)
519                 return -ENOMEM;
520
521         /* now put it all together
522          * - store queue data in internals,
523          * - store numa_node info in ethdev data
524          * - point eth_dev_data to internals
525          * - and point eth_dev structure to new eth_dev_data structure
526          */
527         /* NOTE: we'll replace the data element, of originally allocated eth_dev
528          * so the nulls are local per-process */
529
530         internals = eth_dev->data->dev_private;
531         internals->packet_size = packet_size;
532         internals->packet_copy = packet_copy;
533         internals->port_id = eth_dev->data->port_id;
534         rte_eth_random_addr(internals->eth_addr.addr_bytes);
535
536         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
537         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
538
539         rte_memcpy(internals->rss_key, default_rss_key, 40);
540
541         data = eth_dev->data;
542         data->nb_rx_queues = (uint16_t)nb_rx_queues;
543         data->nb_tx_queues = (uint16_t)nb_tx_queues;
544         data->dev_link = pmd_link;
545         data->mac_addrs = &internals->eth_addr;
546
547         eth_dev->dev_ops = &ops;
548
549         /* finally assign rx and tx ops */
550         if (packet_copy) {
551                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
552                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
553         } else {
554                 eth_dev->rx_pkt_burst = eth_null_rx;
555                 eth_dev->tx_pkt_burst = eth_null_tx;
556         }
557
558         rte_eth_dev_probing_finish(eth_dev);
559         return 0;
560 }
561
562 static inline int
563 get_packet_size_arg(const char *key __rte_unused,
564                 const char *value, void *extra_args)
565 {
566         const char *a = value;
567         unsigned *packet_size = extra_args;
568
569         if ((value == NULL) || (extra_args == NULL))
570                 return -EINVAL;
571
572         *packet_size = (unsigned)strtoul(a, NULL, 0);
573         if (*packet_size == UINT_MAX)
574                 return -1;
575
576         return 0;
577 }
578
579 static inline int
580 get_packet_copy_arg(const char *key __rte_unused,
581                 const char *value, void *extra_args)
582 {
583         const char *a = value;
584         unsigned *packet_copy = extra_args;
585
586         if ((value == NULL) || (extra_args == NULL))
587                 return -EINVAL;
588
589         *packet_copy = (unsigned)strtoul(a, NULL, 0);
590         if (*packet_copy == UINT_MAX)
591                 return -1;
592
593         return 0;
594 }
595
596 static int
597 rte_pmd_null_probe(struct rte_vdev_device *dev)
598 {
599         const char *name, *params;
600         unsigned packet_size = default_packet_size;
601         unsigned packet_copy = default_packet_copy;
602         struct rte_kvargs *kvlist = NULL;
603         struct rte_eth_dev *eth_dev;
604         int ret;
605
606         if (!dev)
607                 return -EINVAL;
608
609         name = rte_vdev_device_name(dev);
610         params = rte_vdev_device_args(dev);
611         PMD_LOG(INFO, "Initializing pmd_null for %s", name);
612
613         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
614                 eth_dev = rte_eth_dev_attach_secondary(name);
615                 if (!eth_dev) {
616                         PMD_LOG(ERR, "Failed to probe %s", name);
617                         return -1;
618                 }
619                 /* TODO: request info from primary to set up Rx and Tx */
620                 eth_dev->dev_ops = &ops;
621                 eth_dev->device = &dev->device;
622                 rte_eth_dev_probing_finish(eth_dev);
623                 return 0;
624         }
625
626         if (params != NULL) {
627                 kvlist = rte_kvargs_parse(params, valid_arguments);
628                 if (kvlist == NULL)
629                         return -1;
630
631                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
632
633                         ret = rte_kvargs_process(kvlist,
634                                         ETH_NULL_PACKET_SIZE_ARG,
635                                         &get_packet_size_arg, &packet_size);
636                         if (ret < 0)
637                                 goto free_kvlist;
638                 }
639
640                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
641
642                         ret = rte_kvargs_process(kvlist,
643                                         ETH_NULL_PACKET_COPY_ARG,
644                                         &get_packet_copy_arg, &packet_copy);
645                         if (ret < 0)
646                                 goto free_kvlist;
647                 }
648         }
649
650         PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
651                         "packet copy is %s", packet_size,
652                         packet_copy ? "enabled" : "disabled");
653
654         ret = eth_dev_null_create(dev, packet_size, packet_copy);
655
656 free_kvlist:
657         if (kvlist)
658                 rte_kvargs_free(kvlist);
659         return ret;
660 }
661
662 static int
663 rte_pmd_null_remove(struct rte_vdev_device *dev)
664 {
665         struct rte_eth_dev *eth_dev = NULL;
666
667         if (!dev)
668                 return -EINVAL;
669
670         PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
671                         rte_socket_id());
672
673         /* find the ethdev entry */
674         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
675         if (eth_dev == NULL)
676                 return -1;
677
678         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
679                 /* mac_addrs must not be freed alone because part of dev_private */
680                 eth_dev->data->mac_addrs = NULL;
681
682         rte_eth_dev_release_port(eth_dev);
683
684         return 0;
685 }
686
687 static struct rte_vdev_driver pmd_null_drv = {
688         .probe = rte_pmd_null_probe,
689         .remove = rte_pmd_null_remove,
690 };
691
692 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
693 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
694 RTE_PMD_REGISTER_PARAM_STRING(net_null,
695         "size=<int> "
696         "copy=<int>");
697
698 RTE_INIT(eth_null_init_log)
699 {
700         eth_null_logtype = rte_log_register("pmd.net.null");
701         if (eth_null_logtype >= 0)
702                 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
703 }