net/null: release port upon close
[dpdk.git] / drivers / net / null / rte_eth_null.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) IGEL Co.,Ltd.
3  *  All rights reserved.
4  */
5
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
14
15 #define ETH_NULL_PACKET_SIZE_ARG        "size"
16 #define ETH_NULL_PACKET_COPY_ARG        "copy"
17 #define ETH_NULL_PACKET_NO_RX_ARG       "no-rx"
18
19 static unsigned int default_packet_size = 64;
20 static unsigned int default_packet_copy;
21 static unsigned int default_no_rx;
22
23 static const char *valid_arguments[] = {
24         ETH_NULL_PACKET_SIZE_ARG,
25         ETH_NULL_PACKET_COPY_ARG,
26         ETH_NULL_PACKET_NO_RX_ARG,
27         NULL
28 };
29
30 struct pmd_internals;
31
32 struct null_queue {
33         struct pmd_internals *internals;
34
35         struct rte_mempool *mb_pool;
36         struct rte_mbuf *dummy_packet;
37
38         rte_atomic64_t rx_pkts;
39         rte_atomic64_t tx_pkts;
40 };
41
42 struct pmd_options {
43         unsigned int packet_copy;
44         unsigned int packet_size;
45         unsigned int no_rx;
46 };
47
48 struct pmd_internals {
49         unsigned int packet_size;
50         unsigned int packet_copy;
51         unsigned int no_rx;
52         uint16_t port_id;
53
54         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
55         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
56
57         struct rte_ether_addr eth_addr;
58         /** Bit mask of RSS offloads, the bit offset also means flow type */
59         uint64_t flow_type_rss_offloads;
60
61         rte_spinlock_t rss_lock;
62
63         uint16_t reta_size;
64         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
65                         RTE_RETA_GROUP_SIZE];
66
67         uint8_t rss_key[40];                /**< 40-byte hash key. */
68 };
69 static struct rte_eth_link pmd_link = {
70         .link_speed = ETH_SPEED_NUM_10G,
71         .link_duplex = ETH_LINK_FULL_DUPLEX,
72         .link_status = ETH_LINK_DOWN,
73         .link_autoneg = ETH_LINK_FIXED,
74 };
75
76 RTE_LOG_REGISTER(eth_null_logtype, pmd.net.null, NOTICE);
77
78 #define PMD_LOG(level, fmt, args...) \
79         rte_log(RTE_LOG_ ## level, eth_null_logtype, \
80                 "%s(): " fmt "\n", __func__, ##args)
81
82 static uint16_t
83 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
84 {
85         int i;
86         struct null_queue *h = q;
87         unsigned int packet_size;
88
89         if ((q == NULL) || (bufs == NULL))
90                 return 0;
91
92         packet_size = h->internals->packet_size;
93         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
94                 return 0;
95
96         for (i = 0; i < nb_bufs; i++) {
97                 bufs[i]->data_len = (uint16_t)packet_size;
98                 bufs[i]->pkt_len = packet_size;
99                 bufs[i]->port = h->internals->port_id;
100         }
101
102         rte_atomic64_add(&(h->rx_pkts), i);
103
104         return i;
105 }
106
107 static uint16_t
108 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
109 {
110         int i;
111         struct null_queue *h = q;
112         unsigned int packet_size;
113
114         if ((q == NULL) || (bufs == NULL))
115                 return 0;
116
117         packet_size = h->internals->packet_size;
118         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
119                 return 0;
120
121         for (i = 0; i < nb_bufs; i++) {
122                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
123                                         packet_size);
124                 bufs[i]->data_len = (uint16_t)packet_size;
125                 bufs[i]->pkt_len = packet_size;
126                 bufs[i]->port = h->internals->port_id;
127         }
128
129         rte_atomic64_add(&(h->rx_pkts), i);
130
131         return i;
132 }
133
134 static uint16_t
135 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
136                 uint16_t nb_bufs __rte_unused)
137 {
138         return 0;
139 }
140
141 static uint16_t
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143 {
144         int i;
145         struct null_queue *h = q;
146
147         if ((q == NULL) || (bufs == NULL))
148                 return 0;
149
150         for (i = 0; i < nb_bufs; i++)
151                 rte_pktmbuf_free(bufs[i]);
152
153         rte_atomic64_add(&(h->tx_pkts), i);
154
155         return i;
156 }
157
158 static uint16_t
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
160 {
161         int i;
162         struct null_queue *h = q;
163         unsigned int packet_size;
164
165         if ((q == NULL) || (bufs == NULL))
166                 return 0;
167
168         packet_size = h->internals->packet_size;
169         for (i = 0; i < nb_bufs; i++) {
170                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
171                                         packet_size);
172                 rte_pktmbuf_free(bufs[i]);
173         }
174
175         rte_atomic64_add(&(h->tx_pkts), i);
176
177         return i;
178 }
179
180 static int
181 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
182 {
183         return 0;
184 }
185
186 static int
187 eth_dev_start(struct rte_eth_dev *dev)
188 {
189         if (dev == NULL)
190                 return -EINVAL;
191
192         dev->data->dev_link.link_status = ETH_LINK_UP;
193         return 0;
194 }
195
196 static void
197 eth_dev_stop(struct rte_eth_dev *dev)
198 {
199         if (dev == NULL)
200                 return;
201
202         dev->data->dev_link.link_status = ETH_LINK_DOWN;
203 }
204
205 static int
206 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
207                 uint16_t nb_rx_desc __rte_unused,
208                 unsigned int socket_id __rte_unused,
209                 const struct rte_eth_rxconf *rx_conf __rte_unused,
210                 struct rte_mempool *mb_pool)
211 {
212         struct rte_mbuf *dummy_packet;
213         struct pmd_internals *internals;
214         unsigned int packet_size;
215
216         if ((dev == NULL) || (mb_pool == NULL))
217                 return -EINVAL;
218
219         internals = dev->data->dev_private;
220
221         if (rx_queue_id >= dev->data->nb_rx_queues)
222                 return -ENODEV;
223
224         packet_size = internals->packet_size;
225
226         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
227         dev->data->rx_queues[rx_queue_id] =
228                 &internals->rx_null_queues[rx_queue_id];
229         dummy_packet = rte_zmalloc_socket(NULL,
230                         packet_size, 0, dev->data->numa_node);
231         if (dummy_packet == NULL)
232                 return -ENOMEM;
233
234         internals->rx_null_queues[rx_queue_id].internals = internals;
235         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
236
237         return 0;
238 }
239
240 static int
241 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
242                 uint16_t nb_tx_desc __rte_unused,
243                 unsigned int socket_id __rte_unused,
244                 const struct rte_eth_txconf *tx_conf __rte_unused)
245 {
246         struct rte_mbuf *dummy_packet;
247         struct pmd_internals *internals;
248         unsigned int packet_size;
249
250         if (dev == NULL)
251                 return -EINVAL;
252
253         internals = dev->data->dev_private;
254
255         if (tx_queue_id >= dev->data->nb_tx_queues)
256                 return -ENODEV;
257
258         packet_size = internals->packet_size;
259
260         dev->data->tx_queues[tx_queue_id] =
261                 &internals->tx_null_queues[tx_queue_id];
262         dummy_packet = rte_zmalloc_socket(NULL,
263                         packet_size, 0, dev->data->numa_node);
264         if (dummy_packet == NULL)
265                 return -ENOMEM;
266
267         internals->tx_null_queues[tx_queue_id].internals = internals;
268         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
269
270         return 0;
271 }
272
273 static int
274 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
275 {
276         return 0;
277 }
278
279 static int
280 eth_dev_info(struct rte_eth_dev *dev,
281                 struct rte_eth_dev_info *dev_info)
282 {
283         struct pmd_internals *internals;
284
285         if ((dev == NULL) || (dev_info == NULL))
286                 return -EINVAL;
287
288         internals = dev->data->dev_private;
289         dev_info->max_mac_addrs = 1;
290         dev_info->max_rx_pktlen = (uint32_t)-1;
291         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
292         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
293         dev_info->min_rx_bufsize = 0;
294         dev_info->reta_size = internals->reta_size;
295         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
296
297         return 0;
298 }
299
300 static int
301 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
302 {
303         unsigned int i, num_stats;
304         unsigned long rx_total = 0, tx_total = 0;
305         const struct pmd_internals *internal;
306
307         if ((dev == NULL) || (igb_stats == NULL))
308                 return -EINVAL;
309
310         internal = dev->data->dev_private;
311         num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
312                         RTE_MIN(dev->data->nb_rx_queues,
313                                 RTE_DIM(internal->rx_null_queues)));
314         for (i = 0; i < num_stats; i++) {
315                 igb_stats->q_ipackets[i] =
316                         internal->rx_null_queues[i].rx_pkts.cnt;
317                 rx_total += igb_stats->q_ipackets[i];
318         }
319
320         num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
321                         RTE_MIN(dev->data->nb_tx_queues,
322                                 RTE_DIM(internal->tx_null_queues)));
323         for (i = 0; i < num_stats; i++) {
324                 igb_stats->q_opackets[i] =
325                         internal->tx_null_queues[i].tx_pkts.cnt;
326                 tx_total += igb_stats->q_opackets[i];
327         }
328
329         igb_stats->ipackets = rx_total;
330         igb_stats->opackets = tx_total;
331
332         return 0;
333 }
334
335 static int
336 eth_stats_reset(struct rte_eth_dev *dev)
337 {
338         unsigned int i;
339         struct pmd_internals *internal;
340
341         if (dev == NULL)
342                 return -EINVAL;
343
344         internal = dev->data->dev_private;
345         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
346                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
347         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
348                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
349
350         return 0;
351 }
352
353 static void
354 eth_queue_release(void *q)
355 {
356         struct null_queue *nq;
357
358         if (q == NULL)
359                 return;
360
361         nq = q;
362         rte_free(nq->dummy_packet);
363 }
364
365 static int
366 eth_link_update(struct rte_eth_dev *dev __rte_unused,
367                 int wait_to_complete __rte_unused) { return 0; }
368
369 static int
370 eth_rss_reta_update(struct rte_eth_dev *dev,
371                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
372 {
373         int i, j;
374         struct pmd_internals *internal = dev->data->dev_private;
375
376         if (reta_size != internal->reta_size)
377                 return -EINVAL;
378
379         rte_spinlock_lock(&internal->rss_lock);
380
381         /* Copy RETA table */
382         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
383                 internal->reta_conf[i].mask = reta_conf[i].mask;
384                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
385                         if ((reta_conf[i].mask >> j) & 0x01)
386                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
387         }
388
389         rte_spinlock_unlock(&internal->rss_lock);
390
391         return 0;
392 }
393
394 static int
395 eth_rss_reta_query(struct rte_eth_dev *dev,
396                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
397 {
398         int i, j;
399         struct pmd_internals *internal = dev->data->dev_private;
400
401         if (reta_size != internal->reta_size)
402                 return -EINVAL;
403
404         rte_spinlock_lock(&internal->rss_lock);
405
406         /* Copy RETA table */
407         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
408                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
409                         if ((reta_conf[i].mask >> j) & 0x01)
410                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
411         }
412
413         rte_spinlock_unlock(&internal->rss_lock);
414
415         return 0;
416 }
417
418 static int
419 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
420 {
421         struct pmd_internals *internal = dev->data->dev_private;
422
423         rte_spinlock_lock(&internal->rss_lock);
424
425         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
426                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
427                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
428
429         if (rss_conf->rss_key)
430                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
431
432         rte_spinlock_unlock(&internal->rss_lock);
433
434         return 0;
435 }
436
437 static int
438 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
439                 struct rte_eth_rss_conf *rss_conf)
440 {
441         struct pmd_internals *internal = dev->data->dev_private;
442
443         rte_spinlock_lock(&internal->rss_lock);
444
445         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
446         if (rss_conf->rss_key)
447                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
448
449         rte_spinlock_unlock(&internal->rss_lock);
450
451         return 0;
452 }
453
454 static int
455 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
456                     __rte_unused struct rte_ether_addr *addr)
457 {
458         return 0;
459 }
460
461 static int
462 eth_dev_close(struct rte_eth_dev *dev)
463 {
464         PMD_LOG(INFO, "Closing null ethdev on NUMA socket %u",
465                         rte_socket_id());
466
467         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
468                 return 0;
469
470         /* mac_addrs must not be freed alone because part of dev_private */
471         dev->data->mac_addrs = NULL;
472
473         return 0;
474 }
475
476 static const struct eth_dev_ops ops = {
477         .dev_close = eth_dev_close,
478         .dev_start = eth_dev_start,
479         .dev_stop = eth_dev_stop,
480         .dev_configure = eth_dev_configure,
481         .dev_infos_get = eth_dev_info,
482         .rx_queue_setup = eth_rx_queue_setup,
483         .tx_queue_setup = eth_tx_queue_setup,
484         .rx_queue_release = eth_queue_release,
485         .tx_queue_release = eth_queue_release,
486         .mtu_set = eth_mtu_set,
487         .link_update = eth_link_update,
488         .mac_addr_set = eth_mac_address_set,
489         .stats_get = eth_stats_get,
490         .stats_reset = eth_stats_reset,
491         .reta_update = eth_rss_reta_update,
492         .reta_query = eth_rss_reta_query,
493         .rss_hash_update = eth_rss_hash_update,
494         .rss_hash_conf_get = eth_rss_hash_conf_get
495 };
496
497 static int
498 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
499 {
500         const unsigned int nb_rx_queues = 1;
501         const unsigned int nb_tx_queues = 1;
502         struct rte_eth_dev_data *data;
503         struct pmd_internals *internals = NULL;
504         struct rte_eth_dev *eth_dev = NULL;
505
506         static const uint8_t default_rss_key[40] = {
507                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
508                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
509                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
510                 0xBE, 0xAC, 0x01, 0xFA
511         };
512
513         if (dev->device.numa_node == SOCKET_ID_ANY)
514                 dev->device.numa_node = rte_socket_id();
515
516         PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
517                 dev->device.numa_node);
518
519         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
520         if (!eth_dev)
521                 return -ENOMEM;
522
523         /* now put it all together
524          * - store queue data in internals,
525          * - store numa_node info in ethdev data
526          * - point eth_dev_data to internals
527          * - and point eth_dev structure to new eth_dev_data structure
528          */
529         /* NOTE: we'll replace the data element, of originally allocated eth_dev
530          * so the nulls are local per-process */
531
532         internals = eth_dev->data->dev_private;
533         internals->packet_size = args->packet_size;
534         internals->packet_copy = args->packet_copy;
535         internals->no_rx = args->no_rx;
536         internals->port_id = eth_dev->data->port_id;
537         rte_eth_random_addr(internals->eth_addr.addr_bytes);
538
539         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
540         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
541
542         rte_memcpy(internals->rss_key, default_rss_key, 40);
543
544         data = eth_dev->data;
545         data->nb_rx_queues = (uint16_t)nb_rx_queues;
546         data->nb_tx_queues = (uint16_t)nb_tx_queues;
547         data->dev_link = pmd_link;
548         data->mac_addrs = &internals->eth_addr;
549         data->promiscuous = 1;
550         data->all_multicast = 1;
551         data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
552
553         eth_dev->dev_ops = &ops;
554
555         /* finally assign rx and tx ops */
556         if (internals->packet_copy) {
557                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
558                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
559         } else if (internals->no_rx) {
560                 eth_dev->rx_pkt_burst = eth_null_no_rx;
561                 eth_dev->tx_pkt_burst = eth_null_tx;
562         } else {
563                 eth_dev->rx_pkt_burst = eth_null_rx;
564                 eth_dev->tx_pkt_burst = eth_null_tx;
565         }
566
567         rte_eth_dev_probing_finish(eth_dev);
568         return 0;
569 }
570
571 static inline int
572 get_packet_size_arg(const char *key __rte_unused,
573                 const char *value, void *extra_args)
574 {
575         const char *a = value;
576         unsigned int *packet_size = extra_args;
577
578         if ((value == NULL) || (extra_args == NULL))
579                 return -EINVAL;
580
581         *packet_size = (unsigned int)strtoul(a, NULL, 0);
582         if (*packet_size == UINT_MAX)
583                 return -1;
584
585         return 0;
586 }
587
588 static inline int
589 get_packet_copy_arg(const char *key __rte_unused,
590                 const char *value, void *extra_args)
591 {
592         const char *a = value;
593         unsigned int *packet_copy = extra_args;
594
595         if ((value == NULL) || (extra_args == NULL))
596                 return -EINVAL;
597
598         *packet_copy = (unsigned int)strtoul(a, NULL, 0);
599         if (*packet_copy == UINT_MAX)
600                 return -1;
601
602         return 0;
603 }
604
605 static int
606 get_packet_no_rx_arg(const char *key __rte_unused,
607                 const char *value, void *extra_args)
608 {
609         const char *a = value;
610         unsigned int no_rx;
611
612         if (value == NULL || extra_args == NULL)
613                 return -EINVAL;
614
615         no_rx = (unsigned int)strtoul(a, NULL, 0);
616         if (no_rx != 0 && no_rx != 1)
617                 return -1;
618
619         *(unsigned int *)extra_args = no_rx;
620         return 0;
621 }
622
623 static int
624 rte_pmd_null_probe(struct rte_vdev_device *dev)
625 {
626         const char *name, *params;
627         struct pmd_options args = {
628                 .packet_copy = default_packet_copy,
629                 .packet_size = default_packet_size,
630                 .no_rx = default_no_rx,
631         };
632         struct rte_kvargs *kvlist = NULL;
633         struct rte_eth_dev *eth_dev;
634         int ret;
635
636         if (!dev)
637                 return -EINVAL;
638
639         name = rte_vdev_device_name(dev);
640         params = rte_vdev_device_args(dev);
641         PMD_LOG(INFO, "Initializing pmd_null for %s", name);
642
643         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
644                 struct pmd_internals *internals;
645                 eth_dev = rte_eth_dev_attach_secondary(name);
646                 if (!eth_dev) {
647                         PMD_LOG(ERR, "Failed to probe %s", name);
648                         return -1;
649                 }
650                 /* TODO: request info from primary to set up Rx and Tx */
651                 eth_dev->dev_ops = &ops;
652                 eth_dev->device = &dev->device;
653                 internals = eth_dev->data->dev_private;
654                 if (internals->packet_copy) {
655                         eth_dev->rx_pkt_burst = eth_null_copy_rx;
656                         eth_dev->tx_pkt_burst = eth_null_copy_tx;
657                 } else if (internals->no_rx) {
658                         eth_dev->rx_pkt_burst = eth_null_no_rx;
659                         eth_dev->tx_pkt_burst = eth_null_tx;
660                 } else {
661                         eth_dev->rx_pkt_burst = eth_null_rx;
662                         eth_dev->tx_pkt_burst = eth_null_tx;
663                 }
664                 rte_eth_dev_probing_finish(eth_dev);
665                 return 0;
666         }
667
668         if (params != NULL) {
669                 kvlist = rte_kvargs_parse(params, valid_arguments);
670                 if (kvlist == NULL)
671                         return -1;
672
673                 ret = rte_kvargs_process(kvlist,
674                                 ETH_NULL_PACKET_SIZE_ARG,
675                                 &get_packet_size_arg, &args.packet_size);
676                 if (ret < 0)
677                         goto free_kvlist;
678
679
680                 ret = rte_kvargs_process(kvlist,
681                                 ETH_NULL_PACKET_COPY_ARG,
682                                 &get_packet_copy_arg, &args.packet_copy);
683                 if (ret < 0)
684                         goto free_kvlist;
685
686                 ret = rte_kvargs_process(kvlist,
687                                 ETH_NULL_PACKET_NO_RX_ARG,
688                                 &get_packet_no_rx_arg, &args.no_rx);
689                 if (ret < 0)
690                         goto free_kvlist;
691
692                 if (args.no_rx && args.packet_copy) {
693                         PMD_LOG(ERR,
694                                 "Both %s and %s arguments at the same time not supported",
695                                 ETH_NULL_PACKET_COPY_ARG,
696                                 ETH_NULL_PACKET_NO_RX_ARG);
697                         goto free_kvlist;
698                 }
699         }
700
701         PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
702                         "packet copy is %s", args.packet_size,
703                         args.packet_copy ? "enabled" : "disabled");
704
705         ret = eth_dev_null_create(dev, &args);
706
707 free_kvlist:
708         if (kvlist)
709                 rte_kvargs_free(kvlist);
710         return ret;
711 }
712
713 static int
714 rte_pmd_null_remove(struct rte_vdev_device *dev)
715 {
716         struct rte_eth_dev *eth_dev = NULL;
717
718         if (!dev)
719                 return -EINVAL;
720
721         /* find the ethdev entry */
722         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
723         if (eth_dev == NULL)
724                 return 0; /* port already released */
725
726         eth_dev_close(eth_dev);
727         rte_eth_dev_release_port(eth_dev);
728
729         return 0;
730 }
731
732 static struct rte_vdev_driver pmd_null_drv = {
733         .probe = rte_pmd_null_probe,
734         .remove = rte_pmd_null_remove,
735 };
736
737 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
738 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
739 RTE_PMD_REGISTER_PARAM_STRING(net_null,
740         "size=<int> "
741         "copy=<int> "
742         ETH_NULL_PACKET_NO_RX_ARG "=0|1");