net/pcap: use a struct to pass user options
[dpdk.git] / drivers / net / pcap / rte_eth_pcap.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright(c) 2014 6WIND S.A.
4  * All rights reserved.
5  */
6
7 #include <time.h>
8
9 #include <net/if.h>
10 #include <sys/socket.h>
11 #include <sys/ioctl.h>
12 #include <unistd.h>
13
14 #if defined(RTE_EXEC_ENV_FREEBSD)
15 #include <sys/sysctl.h>
16 #include <net/if_dl.h>
17 #endif
18
19 #include <pcap.h>
20
21 #include <rte_cycles.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
26 #include <rte_mbuf.h>
27 #include <rte_bus_vdev.h>
28 #include <rte_string_fns.h>
29
30 #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
31 #define RTE_ETH_PCAP_SNAPLEN RTE_ETHER_MAX_JUMBO_FRAME_LEN
32 #define RTE_ETH_PCAP_PROMISC 1
33 #define RTE_ETH_PCAP_TIMEOUT -1
34
35 #define ETH_PCAP_RX_PCAP_ARG  "rx_pcap"
36 #define ETH_PCAP_TX_PCAP_ARG  "tx_pcap"
37 #define ETH_PCAP_RX_IFACE_ARG "rx_iface"
38 #define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in"
39 #define ETH_PCAP_TX_IFACE_ARG "tx_iface"
40 #define ETH_PCAP_IFACE_ARG    "iface"
41 #define ETH_PCAP_PHY_MAC_ARG  "phy_mac"
42
43 #define ETH_PCAP_ARG_MAXLEN     64
44
45 #define RTE_PMD_PCAP_MAX_QUEUES 16
46
47 static char errbuf[PCAP_ERRBUF_SIZE];
48 static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
49 static struct timeval start_time;
50 static uint64_t start_cycles;
51 static uint64_t hz;
52 static uint8_t iface_idx;
53
54 struct queue_stat {
55         volatile unsigned long pkts;
56         volatile unsigned long bytes;
57         volatile unsigned long err_pkts;
58 };
59
60 struct pcap_rx_queue {
61         uint16_t port_id;
62         uint16_t queue_id;
63         struct rte_mempool *mb_pool;
64         struct queue_stat rx_stat;
65         char name[PATH_MAX];
66         char type[ETH_PCAP_ARG_MAXLEN];
67 };
68
69 struct pcap_tx_queue {
70         uint16_t port_id;
71         uint16_t queue_id;
72         struct queue_stat tx_stat;
73         char name[PATH_MAX];
74         char type[ETH_PCAP_ARG_MAXLEN];
75 };
76
77 struct pmd_internals {
78         struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
79         struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
80         char devargs[ETH_PCAP_ARG_MAXLEN];
81         struct rte_ether_addr eth_addr;
82         int if_index;
83         int single_iface;
84         int phy_mac;
85 };
86
87 struct pmd_process_private {
88         pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
89         pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
90         pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES];
91 };
92
93 struct pmd_devargs {
94         unsigned int num_of_queue;
95         struct devargs_queue {
96                 pcap_dumper_t *dumper;
97                 pcap_t *pcap;
98                 const char *name;
99                 const char *type;
100         } queue[RTE_PMD_PCAP_MAX_QUEUES];
101         int phy_mac;
102 };
103
104 struct pmd_devargs_all {
105         struct pmd_devargs rx_queues;
106         struct pmd_devargs tx_queues;
107         int single_iface;
108         unsigned int is_tx_pcap;
109         unsigned int is_tx_iface;
110 };
111
112 static const char *valid_arguments[] = {
113         ETH_PCAP_RX_PCAP_ARG,
114         ETH_PCAP_TX_PCAP_ARG,
115         ETH_PCAP_RX_IFACE_ARG,
116         ETH_PCAP_RX_IFACE_IN_ARG,
117         ETH_PCAP_TX_IFACE_ARG,
118         ETH_PCAP_IFACE_ARG,
119         ETH_PCAP_PHY_MAC_ARG,
120         NULL
121 };
122
123 static struct rte_eth_link pmd_link = {
124                 .link_speed = ETH_SPEED_NUM_10G,
125                 .link_duplex = ETH_LINK_FULL_DUPLEX,
126                 .link_status = ETH_LINK_DOWN,
127                 .link_autoneg = ETH_LINK_FIXED,
128 };
129
130 static int eth_pcap_logtype;
131
132 #define PMD_LOG(level, fmt, args...) \
133         rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
134                 "%s(): " fmt "\n", __func__, ##args)
135
136 static int
137 eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
138                 const u_char *data, uint16_t data_len)
139 {
140         /* Copy the first segment. */
141         uint16_t len = rte_pktmbuf_tailroom(mbuf);
142         struct rte_mbuf *m = mbuf;
143
144         rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
145         data_len -= len;
146         data += len;
147
148         while (data_len > 0) {
149                 /* Allocate next mbuf and point to that. */
150                 m->next = rte_pktmbuf_alloc(mb_pool);
151
152                 if (unlikely(!m->next))
153                         return -1;
154
155                 m = m->next;
156
157                 /* Headroom is not needed in chained mbufs. */
158                 rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
159                 m->pkt_len = 0;
160                 m->data_len = 0;
161
162                 /* Copy next segment. */
163                 len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len);
164                 rte_memcpy(rte_pktmbuf_append(m, len), data, len);
165
166                 mbuf->nb_segs++;
167                 data_len -= len;
168                 data += len;
169         }
170
171         return mbuf->nb_segs;
172 }
173
174 /* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
175 static void
176 eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
177 {
178         uint16_t data_len = 0;
179
180         while (mbuf) {
181                 rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
182                         mbuf->data_len);
183
184                 data_len += mbuf->data_len;
185                 mbuf = mbuf->next;
186         }
187 }
188
189 static uint16_t
190 eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
191 {
192         unsigned int i;
193         struct pcap_pkthdr header;
194         struct pmd_process_private *pp;
195         const u_char *packet;
196         struct rte_mbuf *mbuf;
197         struct pcap_rx_queue *pcap_q = queue;
198         uint16_t num_rx = 0;
199         uint16_t buf_size;
200         uint32_t rx_bytes = 0;
201         pcap_t *pcap;
202
203         pp = rte_eth_devices[pcap_q->port_id].process_private;
204         pcap = pp->rx_pcap[pcap_q->queue_id];
205
206         if (unlikely(pcap == NULL || nb_pkts == 0))
207                 return 0;
208
209         /* Reads the given number of packets from the pcap file one by one
210          * and copies the packet data into a newly allocated mbuf to return.
211          */
212         for (i = 0; i < nb_pkts; i++) {
213                 /* Get the next PCAP packet */
214                 packet = pcap_next(pcap, &header);
215                 if (unlikely(packet == NULL))
216                         break;
217
218                 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
219                 if (unlikely(mbuf == NULL))
220                         break;
221
222                 /* Now get the space available for data in the mbuf */
223                 buf_size = rte_pktmbuf_data_room_size(pcap_q->mb_pool) -
224                                 RTE_PKTMBUF_HEADROOM;
225
226                 if (header.caplen <= buf_size) {
227                         /* pcap packet will fit in the mbuf, can copy it */
228                         rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
229                                         header.caplen);
230                         mbuf->data_len = (uint16_t)header.caplen;
231                 } else {
232                         /* Try read jumbo frame into multi mbufs. */
233                         if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
234                                                        mbuf,
235                                                        packet,
236                                                        header.caplen) == -1)) {
237                                 rte_pktmbuf_free(mbuf);
238                                 break;
239                         }
240                 }
241
242                 mbuf->pkt_len = (uint16_t)header.caplen;
243                 mbuf->port = pcap_q->port_id;
244                 bufs[num_rx] = mbuf;
245                 num_rx++;
246                 rx_bytes += header.caplen;
247         }
248         pcap_q->rx_stat.pkts += num_rx;
249         pcap_q->rx_stat.bytes += rx_bytes;
250
251         return num_rx;
252 }
253
254 static inline void
255 calculate_timestamp(struct timeval *ts) {
256         uint64_t cycles;
257         struct timeval cur_time;
258
259         cycles = rte_get_timer_cycles() - start_cycles;
260         cur_time.tv_sec = cycles / hz;
261         cur_time.tv_usec = (cycles % hz) * 1e6 / hz;
262         timeradd(&start_time, &cur_time, ts);
263 }
264
265 /*
266  * Callback to handle writing packets to a pcap file.
267  */
268 static uint16_t
269 eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
270 {
271         unsigned int i;
272         struct rte_mbuf *mbuf;
273         struct pmd_process_private *pp;
274         struct pcap_tx_queue *dumper_q = queue;
275         uint16_t num_tx = 0;
276         uint32_t tx_bytes = 0;
277         struct pcap_pkthdr header;
278         pcap_dumper_t *dumper;
279
280         pp = rte_eth_devices[dumper_q->port_id].process_private;
281         dumper = pp->tx_dumper[dumper_q->queue_id];
282
283         if (dumper == NULL || nb_pkts == 0)
284                 return 0;
285
286         /* writes the nb_pkts packets to the previously opened pcap file
287          * dumper */
288         for (i = 0; i < nb_pkts; i++) {
289                 mbuf = bufs[i];
290                 calculate_timestamp(&header.ts);
291                 header.len = mbuf->pkt_len;
292                 header.caplen = header.len;
293
294                 if (likely(mbuf->nb_segs == 1)) {
295                         pcap_dump((u_char *)dumper, &header,
296                                   rte_pktmbuf_mtod(mbuf, void*));
297                 } else {
298                         if (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
299                                 eth_pcap_gather_data(tx_pcap_data, mbuf);
300                                 pcap_dump((u_char *)dumper, &header,
301                                           tx_pcap_data);
302                         } else {
303                                 PMD_LOG(ERR,
304                                         "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
305                                         mbuf->pkt_len,
306                                         RTE_ETHER_MAX_JUMBO_FRAME_LEN);
307
308                                 rte_pktmbuf_free(mbuf);
309                                 break;
310                         }
311                 }
312
313                 num_tx++;
314                 tx_bytes += mbuf->pkt_len;
315                 rte_pktmbuf_free(mbuf);
316         }
317
318         /*
319          * Since there's no place to hook a callback when the forwarding
320          * process stops and to make sure the pcap file is actually written,
321          * we flush the pcap dumper within each burst.
322          */
323         pcap_dump_flush(dumper);
324         dumper_q->tx_stat.pkts += num_tx;
325         dumper_q->tx_stat.bytes += tx_bytes;
326         dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
327
328         return num_tx;
329 }
330
331 /*
332  * Callback to handle sending packets through a real NIC.
333  */
334 static uint16_t
335 eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
336 {
337         unsigned int i;
338         int ret;
339         struct rte_mbuf *mbuf;
340         struct pmd_process_private *pp;
341         struct pcap_tx_queue *tx_queue = queue;
342         uint16_t num_tx = 0;
343         uint32_t tx_bytes = 0;
344         pcap_t *pcap;
345
346         pp = rte_eth_devices[tx_queue->port_id].process_private;
347         pcap = pp->tx_pcap[tx_queue->queue_id];
348
349         if (unlikely(nb_pkts == 0 || pcap == NULL))
350                 return 0;
351
352         for (i = 0; i < nb_pkts; i++) {
353                 mbuf = bufs[i];
354
355                 if (likely(mbuf->nb_segs == 1)) {
356                         ret = pcap_sendpacket(pcap,
357                                         rte_pktmbuf_mtod(mbuf, u_char *),
358                                         mbuf->pkt_len);
359                 } else {
360                         if (mbuf->pkt_len <= RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
361                                 eth_pcap_gather_data(tx_pcap_data, mbuf);
362                                 ret = pcap_sendpacket(pcap,
363                                                 tx_pcap_data, mbuf->pkt_len);
364                         } else {
365                                 PMD_LOG(ERR,
366                                         "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
367                                         mbuf->pkt_len,
368                                         RTE_ETHER_MAX_JUMBO_FRAME_LEN);
369
370                                 rte_pktmbuf_free(mbuf);
371                                 break;
372                         }
373                 }
374
375                 if (unlikely(ret != 0))
376                         break;
377                 num_tx++;
378                 tx_bytes += mbuf->pkt_len;
379                 rte_pktmbuf_free(mbuf);
380         }
381
382         tx_queue->tx_stat.pkts += num_tx;
383         tx_queue->tx_stat.bytes += tx_bytes;
384         tx_queue->tx_stat.err_pkts += nb_pkts - num_tx;
385
386         return num_tx;
387 }
388
389 /*
390  * pcap_open_live wrapper function
391  */
392 static inline int
393 open_iface_live(const char *iface, pcap_t **pcap) {
394         *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
395                         RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
396
397         if (*pcap == NULL) {
398                 PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf);
399                 return -1;
400         }
401
402         return 0;
403 }
404
405 static int
406 open_single_iface(const char *iface, pcap_t **pcap)
407 {
408         if (open_iface_live(iface, pcap) < 0) {
409                 PMD_LOG(ERR, "Couldn't open interface %s", iface);
410                 return -1;
411         }
412
413         return 0;
414 }
415
416 static int
417 open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
418 {
419         pcap_t *tx_pcap;
420
421         /*
422          * We need to create a dummy empty pcap_t to use it
423          * with pcap_dump_open(). We create big enough an Ethernet
424          * pcap holder.
425          */
426         tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN);
427         if (tx_pcap == NULL) {
428                 PMD_LOG(ERR, "Couldn't create dead pcap");
429                 return -1;
430         }
431
432         /* The dumper is created using the previous pcap_t reference */
433         *dumper = pcap_dump_open(tx_pcap, pcap_filename);
434         if (*dumper == NULL) {
435                 pcap_close(tx_pcap);
436                 PMD_LOG(ERR, "Couldn't open %s for writing.",
437                         pcap_filename);
438                 return -1;
439         }
440
441         pcap_close(tx_pcap);
442         return 0;
443 }
444
445 static int
446 open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
447 {
448         *pcap = pcap_open_offline(pcap_filename, errbuf);
449         if (*pcap == NULL) {
450                 PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename,
451                         errbuf);
452                 return -1;
453         }
454
455         return 0;
456 }
457
458 static int
459 eth_dev_start(struct rte_eth_dev *dev)
460 {
461         unsigned int i;
462         struct pmd_internals *internals = dev->data->dev_private;
463         struct pmd_process_private *pp = dev->process_private;
464         struct pcap_tx_queue *tx;
465         struct pcap_rx_queue *rx;
466
467         /* Special iface case. Single pcap is open and shared between tx/rx. */
468         if (internals->single_iface) {
469                 tx = &internals->tx_queue[0];
470                 rx = &internals->rx_queue[0];
471
472                 if (!pp->tx_pcap[0] &&
473                         strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
474                         if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0)
475                                 return -1;
476                         pp->rx_pcap[0] = pp->tx_pcap[0];
477                 }
478
479                 goto status_up;
480         }
481
482         /* If not open already, open tx pcaps/dumpers */
483         for (i = 0; i < dev->data->nb_tx_queues; i++) {
484                 tx = &internals->tx_queue[i];
485
486                 if (!pp->tx_dumper[i] &&
487                                 strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
488                         if (open_single_tx_pcap(tx->name,
489                                 &pp->tx_dumper[i]) < 0)
490                                 return -1;
491                 } else if (!pp->tx_pcap[i] &&
492                                 strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
493                         if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0)
494                                 return -1;
495                 }
496         }
497
498         /* If not open already, open rx pcaps */
499         for (i = 0; i < dev->data->nb_rx_queues; i++) {
500                 rx = &internals->rx_queue[i];
501
502                 if (pp->rx_pcap[i] != NULL)
503                         continue;
504
505                 if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
506                         if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0)
507                                 return -1;
508                 } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
509                         if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0)
510                                 return -1;
511                 }
512         }
513
514 status_up:
515         for (i = 0; i < dev->data->nb_rx_queues; i++)
516                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
517
518         for (i = 0; i < dev->data->nb_tx_queues; i++)
519                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
520
521         dev->data->dev_link.link_status = ETH_LINK_UP;
522
523         return 0;
524 }
525
526 /*
527  * This function gets called when the current port gets stopped.
528  * Is the only place for us to close all the tx streams dumpers.
529  * If not called the dumpers will be flushed within each tx burst.
530  */
531 static void
532 eth_dev_stop(struct rte_eth_dev *dev)
533 {
534         unsigned int i;
535         struct pmd_internals *internals = dev->data->dev_private;
536         struct pmd_process_private *pp = dev->process_private;
537
538         /* Special iface case. Single pcap is open and shared between tx/rx. */
539         if (internals->single_iface) {
540                 pcap_close(pp->tx_pcap[0]);
541                 pp->tx_pcap[0] = NULL;
542                 pp->rx_pcap[0] = NULL;
543                 goto status_down;
544         }
545
546         for (i = 0; i < dev->data->nb_tx_queues; i++) {
547                 if (pp->tx_dumper[i] != NULL) {
548                         pcap_dump_close(pp->tx_dumper[i]);
549                         pp->tx_dumper[i] = NULL;
550                 }
551
552                 if (pp->tx_pcap[i] != NULL) {
553                         pcap_close(pp->tx_pcap[i]);
554                         pp->tx_pcap[i] = NULL;
555                 }
556         }
557
558         for (i = 0; i < dev->data->nb_rx_queues; i++) {
559                 if (pp->rx_pcap[i] != NULL) {
560                         pcap_close(pp->rx_pcap[i]);
561                         pp->rx_pcap[i] = NULL;
562                 }
563         }
564
565 status_down:
566         for (i = 0; i < dev->data->nb_rx_queues; i++)
567                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
568
569         for (i = 0; i < dev->data->nb_tx_queues; i++)
570                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
571
572         dev->data->dev_link.link_status = ETH_LINK_DOWN;
573 }
574
575 static int
576 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
577 {
578         return 0;
579 }
580
581 static void
582 eth_dev_info(struct rte_eth_dev *dev,
583                 struct rte_eth_dev_info *dev_info)
584 {
585         struct pmd_internals *internals = dev->data->dev_private;
586
587         dev_info->if_index = internals->if_index;
588         dev_info->max_mac_addrs = 1;
589         dev_info->max_rx_pktlen = (uint32_t) -1;
590         dev_info->max_rx_queues = dev->data->nb_rx_queues;
591         dev_info->max_tx_queues = dev->data->nb_tx_queues;
592         dev_info->min_rx_bufsize = 0;
593 }
594
595 static int
596 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
597 {
598         unsigned int i;
599         unsigned long rx_packets_total = 0, rx_bytes_total = 0;
600         unsigned long tx_packets_total = 0, tx_bytes_total = 0;
601         unsigned long tx_packets_err_total = 0;
602         const struct pmd_internals *internal = dev->data->dev_private;
603
604         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
605                         i < dev->data->nb_rx_queues; i++) {
606                 stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
607                 stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
608                 rx_packets_total += stats->q_ipackets[i];
609                 rx_bytes_total += stats->q_ibytes[i];
610         }
611
612         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
613                         i < dev->data->nb_tx_queues; i++) {
614                 stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
615                 stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
616                 tx_packets_total += stats->q_opackets[i];
617                 tx_bytes_total += stats->q_obytes[i];
618                 tx_packets_err_total += internal->tx_queue[i].tx_stat.err_pkts;
619         }
620
621         stats->ipackets = rx_packets_total;
622         stats->ibytes = rx_bytes_total;
623         stats->opackets = tx_packets_total;
624         stats->obytes = tx_bytes_total;
625         stats->oerrors = tx_packets_err_total;
626
627         return 0;
628 }
629
630 static void
631 eth_stats_reset(struct rte_eth_dev *dev)
632 {
633         unsigned int i;
634         struct pmd_internals *internal = dev->data->dev_private;
635
636         for (i = 0; i < dev->data->nb_rx_queues; i++) {
637                 internal->rx_queue[i].rx_stat.pkts = 0;
638                 internal->rx_queue[i].rx_stat.bytes = 0;
639         }
640
641         for (i = 0; i < dev->data->nb_tx_queues; i++) {
642                 internal->tx_queue[i].tx_stat.pkts = 0;
643                 internal->tx_queue[i].tx_stat.bytes = 0;
644                 internal->tx_queue[i].tx_stat.err_pkts = 0;
645         }
646 }
647
648 static void
649 eth_dev_close(struct rte_eth_dev *dev __rte_unused)
650 {
651 }
652
653 static void
654 eth_queue_release(void *q __rte_unused)
655 {
656 }
657
658 static int
659 eth_link_update(struct rte_eth_dev *dev __rte_unused,
660                 int wait_to_complete __rte_unused)
661 {
662         return 0;
663 }
664
665 static int
666 eth_rx_queue_setup(struct rte_eth_dev *dev,
667                 uint16_t rx_queue_id,
668                 uint16_t nb_rx_desc __rte_unused,
669                 unsigned int socket_id __rte_unused,
670                 const struct rte_eth_rxconf *rx_conf __rte_unused,
671                 struct rte_mempool *mb_pool)
672 {
673         struct pmd_internals *internals = dev->data->dev_private;
674         struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
675
676         pcap_q->mb_pool = mb_pool;
677         pcap_q->port_id = dev->data->port_id;
678         pcap_q->queue_id = rx_queue_id;
679         dev->data->rx_queues[rx_queue_id] = pcap_q;
680
681         return 0;
682 }
683
684 static int
685 eth_tx_queue_setup(struct rte_eth_dev *dev,
686                 uint16_t tx_queue_id,
687                 uint16_t nb_tx_desc __rte_unused,
688                 unsigned int socket_id __rte_unused,
689                 const struct rte_eth_txconf *tx_conf __rte_unused)
690 {
691         struct pmd_internals *internals = dev->data->dev_private;
692         struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
693
694         pcap_q->port_id = dev->data->port_id;
695         pcap_q->queue_id = tx_queue_id;
696         dev->data->tx_queues[tx_queue_id] = pcap_q;
697
698         return 0;
699 }
700
701 static int
702 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
703 {
704         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
705
706         return 0;
707 }
708
709 static int
710 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
711 {
712         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
713
714         return 0;
715 }
716
717 static int
718 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
719 {
720         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
721
722         return 0;
723 }
724
725 static int
726 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
727 {
728         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
729
730         return 0;
731 }
732
733 static const struct eth_dev_ops ops = {
734         .dev_start = eth_dev_start,
735         .dev_stop = eth_dev_stop,
736         .dev_close = eth_dev_close,
737         .dev_configure = eth_dev_configure,
738         .dev_infos_get = eth_dev_info,
739         .rx_queue_setup = eth_rx_queue_setup,
740         .tx_queue_setup = eth_tx_queue_setup,
741         .rx_queue_start = eth_rx_queue_start,
742         .tx_queue_start = eth_tx_queue_start,
743         .rx_queue_stop = eth_rx_queue_stop,
744         .tx_queue_stop = eth_tx_queue_stop,
745         .rx_queue_release = eth_queue_release,
746         .tx_queue_release = eth_queue_release,
747         .link_update = eth_link_update,
748         .stats_get = eth_stats_get,
749         .stats_reset = eth_stats_reset,
750 };
751
752 static int
753 add_queue(struct pmd_devargs *pmd, const char *name, const char *type,
754                 pcap_t *pcap, pcap_dumper_t *dumper)
755 {
756         if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
757                 return -1;
758         if (pcap)
759                 pmd->queue[pmd->num_of_queue].pcap = pcap;
760         if (dumper)
761                 pmd->queue[pmd->num_of_queue].dumper = dumper;
762         pmd->queue[pmd->num_of_queue].name = name;
763         pmd->queue[pmd->num_of_queue].type = type;
764         pmd->num_of_queue++;
765         return 0;
766 }
767
768 /*
769  * Function handler that opens the pcap file for reading a stores a
770  * reference of it for use it later on.
771  */
772 static int
773 open_rx_pcap(const char *key, const char *value, void *extra_args)
774 {
775         const char *pcap_filename = value;
776         struct pmd_devargs *rx = extra_args;
777         pcap_t *pcap = NULL;
778
779         if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
780                 return -1;
781
782         if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) {
783                 pcap_close(pcap);
784                 return -1;
785         }
786
787         return 0;
788 }
789
790 /*
791  * Opens a pcap file for writing and stores a reference to it
792  * for use it later on.
793  */
794 static int
795 open_tx_pcap(const char *key, const char *value, void *extra_args)
796 {
797         const char *pcap_filename = value;
798         struct pmd_devargs *dumpers = extra_args;
799         pcap_dumper_t *dumper;
800
801         if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
802                 return -1;
803
804         if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) {
805                 pcap_dump_close(dumper);
806                 return -1;
807         }
808
809         return 0;
810 }
811
812 /*
813  * Opens an interface for reading and writing
814  */
815 static inline int
816 open_rx_tx_iface(const char *key, const char *value, void *extra_args)
817 {
818         const char *iface = value;
819         struct pmd_devargs *tx = extra_args;
820         pcap_t *pcap = NULL;
821
822         if (open_single_iface(iface, &pcap) < 0)
823                 return -1;
824
825         tx->queue[0].pcap = pcap;
826         tx->queue[0].name = iface;
827         tx->queue[0].type = key;
828
829         return 0;
830 }
831
832 static inline int
833 set_iface_direction(const char *iface, pcap_t *pcap,
834                 pcap_direction_t direction)
835 {
836         const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT";
837         if (pcap_setdirection(pcap, direction) < 0) {
838                 PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n",
839                                 iface, direction_str, pcap_geterr(pcap));
840                 return -1;
841         }
842         PMD_LOG(INFO, "Setting %s pcap direction %s\n",
843                         iface, direction_str);
844         return 0;
845 }
846
847 static inline int
848 open_iface(const char *key, const char *value, void *extra_args)
849 {
850         const char *iface = value;
851         struct pmd_devargs *pmd = extra_args;
852         pcap_t *pcap = NULL;
853
854         if (open_single_iface(iface, &pcap) < 0)
855                 return -1;
856         if (add_queue(pmd, iface, key, pcap, NULL) < 0) {
857                 pcap_close(pcap);
858                 return -1;
859         }
860
861         return 0;
862 }
863
864 /*
865  * Opens a NIC for reading packets from it
866  */
867 static inline int
868 open_rx_iface(const char *key, const char *value, void *extra_args)
869 {
870         int ret = open_iface(key, value, extra_args);
871         if (ret < 0)
872                 return ret;
873         if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) {
874                 struct pmd_devargs *pmd = extra_args;
875                 unsigned int qid = pmd->num_of_queue - 1;
876
877                 set_iface_direction(pmd->queue[qid].name,
878                                 pmd->queue[qid].pcap,
879                                 PCAP_D_IN);
880         }
881
882         return 0;
883 }
884
885 static inline int
886 rx_iface_args_process(const char *key, const char *value, void *extra_args)
887 {
888         if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 ||
889                         strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0)
890                 return open_rx_iface(key, value, extra_args);
891
892         return 0;
893 }
894
895 /*
896  * Opens a NIC for writing packets to it
897  */
898 static int
899 open_tx_iface(const char *key, const char *value, void *extra_args)
900 {
901         return open_iface(key, value, extra_args);
902 }
903
904 static int
905 select_phy_mac(const char *key __rte_unused, const char *value,
906                 void *extra_args)
907 {
908         if (extra_args) {
909                 const int phy_mac = atoi(value);
910                 int *enable_phy_mac = extra_args;
911
912                 if (phy_mac)
913                         *enable_phy_mac = 1;
914         }
915         return 0;
916 }
917
918 static struct rte_vdev_driver pmd_pcap_drv;
919
920 static int
921 pmd_init_internals(struct rte_vdev_device *vdev,
922                 const unsigned int nb_rx_queues,
923                 const unsigned int nb_tx_queues,
924                 struct pmd_internals **internals,
925                 struct rte_eth_dev **eth_dev)
926 {
927         struct rte_eth_dev_data *data;
928         struct pmd_process_private *pp;
929         unsigned int numa_node = vdev->device.numa_node;
930
931         PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
932                 numa_node);
933
934         pp = (struct pmd_process_private *)
935                 rte_zmalloc(NULL, sizeof(struct pmd_process_private),
936                                 RTE_CACHE_LINE_SIZE);
937
938         if (pp == NULL) {
939                 PMD_LOG(ERR,
940                         "Failed to allocate memory for process private");
941                 return -1;
942         }
943
944         /* reserve an ethdev entry */
945         *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
946         if (!(*eth_dev)) {
947                 rte_free(pp);
948                 return -1;
949         }
950         (*eth_dev)->process_private = pp;
951         /* now put it all together
952          * - store queue data in internals,
953          * - store numa_node info in eth_dev
954          * - point eth_dev_data to internals
955          * - and point eth_dev structure to new eth_dev_data structure
956          */
957         *internals = (*eth_dev)->data->dev_private;
958         /*
959          * Interface MAC = 02:70:63:61:70:<iface_idx>
960          * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx'
961          * where the middle 4 characters are converted to hex.
962          */
963         (*internals)->eth_addr = (struct rte_ether_addr) {
964                 .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ }
965         };
966         (*internals)->phy_mac = 0;
967         data = (*eth_dev)->data;
968         data->nb_rx_queues = (uint16_t)nb_rx_queues;
969         data->nb_tx_queues = (uint16_t)nb_tx_queues;
970         data->dev_link = pmd_link;
971         data->mac_addrs = &(*internals)->eth_addr;
972
973         /*
974          * NOTE: we'll replace the data element, of originally allocated
975          * eth_dev so the rings are local per-process
976          */
977         (*eth_dev)->dev_ops = &ops;
978
979         strlcpy((*internals)->devargs, rte_vdev_device_args(vdev),
980                         ETH_PCAP_ARG_MAXLEN);
981
982         return 0;
983 }
984
985 static int
986 eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev,
987                 const unsigned int numa_node)
988 {
989 #if defined(RTE_EXEC_ENV_LINUX)
990         void *mac_addrs;
991         struct ifreq ifr;
992         int if_fd = socket(AF_INET, SOCK_DGRAM, 0);
993
994         if (if_fd == -1)
995                 return -1;
996
997         rte_strscpy(ifr.ifr_name, if_name, sizeof(ifr.ifr_name));
998         if (ioctl(if_fd, SIOCGIFHWADDR, &ifr)) {
999                 close(if_fd);
1000                 return -1;
1001         }
1002
1003         mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
1004         if (!mac_addrs) {
1005                 close(if_fd);
1006                 return -1;
1007         }
1008
1009         PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1010         eth_dev->data->mac_addrs = mac_addrs;
1011         rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1012                         ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1013
1014         close(if_fd);
1015
1016         return 0;
1017
1018 #elif defined(RTE_EXEC_ENV_FREEBSD)
1019         void *mac_addrs;
1020         struct if_msghdr *ifm;
1021         struct sockaddr_dl *sdl;
1022         int mib[6];
1023         size_t len = 0;
1024         char *buf;
1025
1026         mib[0] = CTL_NET;
1027         mib[1] = AF_ROUTE;
1028         mib[2] = 0;
1029         mib[3] = AF_LINK;
1030         mib[4] = NET_RT_IFLIST;
1031         mib[5] = if_nametoindex(if_name);
1032
1033         if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0)
1034                 return -1;
1035
1036         if (len == 0)
1037                 return -1;
1038
1039         buf = rte_malloc(NULL, len, 0);
1040         if (!buf)
1041                 return -1;
1042
1043         if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) {
1044                 rte_free(buf);
1045                 return -1;
1046         }
1047         ifm = (struct if_msghdr *)buf;
1048         sdl = (struct sockaddr_dl *)(ifm + 1);
1049
1050         mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
1051         if (!mac_addrs) {
1052                 rte_free(buf);
1053                 return -1;
1054         }
1055
1056         PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1057         eth_dev->data->mac_addrs = mac_addrs;
1058         rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1059                         LLADDR(sdl), RTE_ETHER_ADDR_LEN);
1060
1061         rte_free(buf);
1062
1063         return 0;
1064 #else
1065         return -1;
1066 #endif
1067 }
1068
1069 static int
1070 eth_from_pcaps_common(struct rte_vdev_device *vdev,
1071                 struct pmd_devargs_all *devargs_all,
1072                 struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
1073 {
1074         struct pmd_process_private *pp;
1075         struct pmd_devargs *rx_queues = &devargs_all->rx_queues;
1076         struct pmd_devargs *tx_queues = &devargs_all->tx_queues;
1077         const unsigned int nb_rx_queues = rx_queues->num_of_queue;
1078         const unsigned int nb_tx_queues = tx_queues->num_of_queue;
1079         unsigned int i;
1080
1081         /* do some parameter checking */
1082         if (rx_queues == NULL && nb_rx_queues > 0)
1083                 return -1;
1084         if (tx_queues == NULL && nb_tx_queues > 0)
1085                 return -1;
1086
1087         if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals,
1088                         eth_dev) < 0)
1089                 return -1;
1090
1091         pp = (*eth_dev)->process_private;
1092         for (i = 0; i < nb_rx_queues; i++) {
1093                 struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
1094                 struct devargs_queue *queue = &rx_queues->queue[i];
1095
1096                 pp->rx_pcap[i] = queue->pcap;
1097                 strlcpy(rx->name, queue->name, sizeof(rx->name));
1098                 strlcpy(rx->type, queue->type, sizeof(rx->type));
1099         }
1100
1101         for (i = 0; i < nb_tx_queues; i++) {
1102                 struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
1103                 struct devargs_queue *queue = &tx_queues->queue[i];
1104
1105                 pp->tx_dumper[i] = queue->dumper;
1106                 pp->tx_pcap[i] = queue->pcap;
1107                 strlcpy(tx->name, queue->name, sizeof(tx->name));
1108                 strlcpy(tx->type, queue->type, sizeof(tx->type));
1109         }
1110
1111         return 0;
1112 }
1113
1114 static int
1115 eth_from_pcaps(struct rte_vdev_device *vdev,
1116                 struct pmd_devargs_all *devargs_all)
1117 {
1118         struct pmd_internals *internals = NULL;
1119         struct rte_eth_dev *eth_dev = NULL;
1120         struct pmd_devargs *rx_queues = &devargs_all->rx_queues;
1121         int single_iface = devargs_all->single_iface;
1122         int ret;
1123
1124         ret = eth_from_pcaps_common(vdev, devargs_all, &internals, &eth_dev);
1125
1126         if (ret < 0)
1127                 return ret;
1128
1129         /* store weather we are using a single interface for rx/tx or not */
1130         internals->single_iface = single_iface;
1131
1132         if (single_iface) {
1133                 internals->if_index = if_nametoindex(rx_queues->queue[0].name);
1134
1135                 /* phy_mac arg is applied only only if "iface" devarg is provided */
1136                 if (rx_queues->phy_mac) {
1137                         int ret = eth_pcap_update_mac(rx_queues->queue[0].name,
1138                                         eth_dev, vdev->device.numa_node);
1139                         if (ret == 0)
1140                                 internals->phy_mac = 1;
1141                 }
1142         }
1143
1144         eth_dev->rx_pkt_burst = eth_pcap_rx;
1145
1146         /* Assign tx ops. */
1147         if (devargs_all->is_tx_pcap)
1148                 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1149         else
1150                 eth_dev->tx_pkt_burst = eth_pcap_tx;
1151
1152         rte_eth_dev_probing_finish(eth_dev);
1153         return 0;
1154 }
1155
1156 static int
1157 pmd_pcap_probe(struct rte_vdev_device *dev)
1158 {
1159         const char *name;
1160         unsigned int is_rx_pcap = 0;
1161         struct rte_kvargs *kvlist;
1162         struct pmd_devargs pcaps = {0};
1163         struct pmd_devargs dumpers = {0};
1164         struct rte_eth_dev *eth_dev =  NULL;
1165         struct pmd_internals *internal;
1166         int ret;
1167
1168         struct pmd_devargs_all devargs_all = {
1169                 .single_iface = 0,
1170                 .is_tx_pcap = 0,
1171                 .is_tx_iface = 0,
1172         };
1173
1174         name = rte_vdev_device_name(dev);
1175         PMD_LOG(INFO, "Initializing pmd_pcap for %s", name);
1176
1177         gettimeofday(&start_time, NULL);
1178         start_cycles = rte_get_timer_cycles();
1179         hz = rte_get_timer_hz();
1180
1181         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1182                 eth_dev = rte_eth_dev_attach_secondary(name);
1183                 if (!eth_dev) {
1184                         PMD_LOG(ERR, "Failed to probe %s", name);
1185                         return -1;
1186                 }
1187
1188                 internal = eth_dev->data->dev_private;
1189
1190                 kvlist = rte_kvargs_parse(internal->devargs, valid_arguments);
1191                 if (kvlist == NULL)
1192                         return -1;
1193         } else {
1194                 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
1195                                 valid_arguments);
1196                 if (kvlist == NULL)
1197                         return -1;
1198         }
1199
1200         /*
1201          * If iface argument is passed we open the NICs and use them for
1202          * reading / writing
1203          */
1204         if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
1205
1206                 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
1207                                 &open_rx_tx_iface, &pcaps);
1208                 if (ret < 0)
1209                         goto free_kvlist;
1210
1211                 dumpers.queue[0] = pcaps.queue[0];
1212
1213                 ret = rte_kvargs_process(kvlist, ETH_PCAP_PHY_MAC_ARG,
1214                                 &select_phy_mac, &pcaps.phy_mac);
1215                 if (ret < 0)
1216                         goto free_kvlist;
1217
1218                 dumpers.phy_mac = pcaps.phy_mac;
1219
1220                 devargs_all.single_iface = 1;
1221                 pcaps.num_of_queue = 1;
1222                 dumpers.num_of_queue = 1;
1223
1224                 goto create_eth;
1225         }
1226
1227         /*
1228          * We check whether we want to open a RX stream from a real NIC or a
1229          * pcap file
1230          */
1231         is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
1232         pcaps.num_of_queue = 0;
1233
1234         if (is_rx_pcap) {
1235                 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
1236                                 &open_rx_pcap, &pcaps);
1237         } else {
1238                 ret = rte_kvargs_process(kvlist, NULL,
1239                                 &rx_iface_args_process, &pcaps);
1240         }
1241
1242         if (ret < 0)
1243                 goto free_kvlist;
1244
1245         /*
1246          * We check whether we want to open a TX stream to a real NIC or a
1247          * pcap file
1248          */
1249         devargs_all.is_tx_pcap =
1250                 rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
1251         dumpers.num_of_queue = 0;
1252
1253         if (devargs_all.is_tx_pcap)
1254                 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
1255                                 &open_tx_pcap, &dumpers);
1256         else
1257                 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
1258                                 &open_tx_iface, &dumpers);
1259
1260         if (ret < 0)
1261                 goto free_kvlist;
1262
1263 create_eth:
1264         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1265                 struct pmd_process_private *pp;
1266                 unsigned int i;
1267
1268                 internal = eth_dev->data->dev_private;
1269                         pp = (struct pmd_process_private *)
1270                                 rte_zmalloc(NULL,
1271                                         sizeof(struct pmd_process_private),
1272                                         RTE_CACHE_LINE_SIZE);
1273
1274                 if (pp == NULL) {
1275                         PMD_LOG(ERR,
1276                                 "Failed to allocate memory for process private");
1277                         ret = -1;
1278                         goto free_kvlist;
1279                 }
1280
1281                 eth_dev->dev_ops = &ops;
1282                 eth_dev->device = &dev->device;
1283
1284                 /* setup process private */
1285                 for (i = 0; i < pcaps.num_of_queue; i++)
1286                         pp->rx_pcap[i] = pcaps.queue[i].pcap;
1287
1288                 for (i = 0; i < dumpers.num_of_queue; i++) {
1289                         pp->tx_dumper[i] = dumpers.queue[i].dumper;
1290                         pp->tx_pcap[i] = dumpers.queue[i].pcap;
1291                 }
1292
1293                 eth_dev->process_private = pp;
1294                 eth_dev->rx_pkt_burst = eth_pcap_rx;
1295                 if (devargs_all.is_tx_pcap)
1296                         eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1297                 else
1298                         eth_dev->tx_pkt_burst = eth_pcap_tx;
1299
1300                 rte_eth_dev_probing_finish(eth_dev);
1301                 goto free_kvlist;
1302         }
1303
1304         devargs_all.rx_queues = pcaps;
1305         devargs_all.tx_queues = dumpers;
1306
1307         ret = eth_from_pcaps(dev, &devargs_all);
1308
1309 free_kvlist:
1310         rte_kvargs_free(kvlist);
1311
1312         return ret;
1313 }
1314
1315 static int
1316 pmd_pcap_remove(struct rte_vdev_device *dev)
1317 {
1318         struct pmd_internals *internals = NULL;
1319         struct rte_eth_dev *eth_dev = NULL;
1320
1321         PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d",
1322                         rte_socket_id());
1323
1324         if (!dev)
1325                 return -1;
1326
1327         /* reserve an ethdev entry */
1328         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1329         if (eth_dev == NULL)
1330                 return -1;
1331
1332         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1333                 internals = eth_dev->data->dev_private;
1334                 if (internals != NULL && internals->phy_mac == 0)
1335                         /* not dynamically allocated, must not be freed */
1336                         eth_dev->data->mac_addrs = NULL;
1337         }
1338
1339         rte_free(eth_dev->process_private);
1340         rte_eth_dev_release_port(eth_dev);
1341
1342         return 0;
1343 }
1344
1345 static struct rte_vdev_driver pmd_pcap_drv = {
1346         .probe = pmd_pcap_probe,
1347         .remove = pmd_pcap_remove,
1348 };
1349
1350 RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
1351 RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap);
1352 RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
1353         ETH_PCAP_RX_PCAP_ARG "=<string> "
1354         ETH_PCAP_TX_PCAP_ARG "=<string> "
1355         ETH_PCAP_RX_IFACE_ARG "=<ifc> "
1356         ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
1357         ETH_PCAP_TX_IFACE_ARG "=<ifc> "
1358         ETH_PCAP_IFACE_ARG "=<ifc> "
1359         ETH_PCAP_PHY_MAC_ARG "=<int>");
1360
1361 RTE_INIT(eth_pcap_init_log)
1362 {
1363         eth_pcap_logtype = rte_log_register("pmd.net.pcap");
1364         if (eth_pcap_logtype >= 0)
1365                 rte_log_set_level(eth_pcap_logtype, RTE_LOG_NOTICE);
1366 }