ethdev: remove old close behaviour
[dpdk.git] / drivers / net / pcap / rte_eth_pcap.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright(c) 2014 6WIND S.A.
4  * All rights reserved.
5  */
6
7 #include <time.h>
8
9 #include <net/if.h>
10 #include <sys/socket.h>
11 #include <sys/ioctl.h>
12 #include <unistd.h>
13
14 #if defined(RTE_EXEC_ENV_FREEBSD)
15 #include <sys/sysctl.h>
16 #include <net/if_dl.h>
17 #endif
18
19 #include <pcap.h>
20
21 #include <rte_cycles.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
26 #include <rte_mbuf.h>
27 #include <rte_bus_vdev.h>
28 #include <rte_string_fns.h>
29
30 #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
31 #define RTE_ETH_PCAP_SNAPLEN RTE_ETHER_MAX_JUMBO_FRAME_LEN
32 #define RTE_ETH_PCAP_PROMISC 1
33 #define RTE_ETH_PCAP_TIMEOUT -1
34
35 #define ETH_PCAP_RX_PCAP_ARG  "rx_pcap"
36 #define ETH_PCAP_TX_PCAP_ARG  "tx_pcap"
37 #define ETH_PCAP_RX_IFACE_ARG "rx_iface"
38 #define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in"
39 #define ETH_PCAP_TX_IFACE_ARG "tx_iface"
40 #define ETH_PCAP_IFACE_ARG    "iface"
41 #define ETH_PCAP_PHY_MAC_ARG  "phy_mac"
42 #define ETH_PCAP_INFINITE_RX_ARG  "infinite_rx"
43
44 #define ETH_PCAP_ARG_MAXLEN     64
45
46 #define RTE_PMD_PCAP_MAX_QUEUES 16
47
48 static char errbuf[PCAP_ERRBUF_SIZE];
49 static struct timeval start_time;
50 static uint64_t start_cycles;
51 static uint64_t hz;
52 static uint8_t iface_idx;
53
54 struct queue_stat {
55         volatile unsigned long pkts;
56         volatile unsigned long bytes;
57         volatile unsigned long err_pkts;
58 };
59
60 struct pcap_rx_queue {
61         uint16_t port_id;
62         uint16_t queue_id;
63         struct rte_mempool *mb_pool;
64         struct queue_stat rx_stat;
65         char name[PATH_MAX];
66         char type[ETH_PCAP_ARG_MAXLEN];
67
68         /* Contains pre-generated packets to be looped through */
69         struct rte_ring *pkts;
70 };
71
72 struct pcap_tx_queue {
73         uint16_t port_id;
74         uint16_t queue_id;
75         struct queue_stat tx_stat;
76         char name[PATH_MAX];
77         char type[ETH_PCAP_ARG_MAXLEN];
78 };
79
80 struct pmd_internals {
81         struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
82         struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
83         char devargs[ETH_PCAP_ARG_MAXLEN];
84         struct rte_ether_addr eth_addr;
85         int if_index;
86         int single_iface;
87         int phy_mac;
88         unsigned int infinite_rx;
89 };
90
91 struct pmd_process_private {
92         pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
93         pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
94         pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES];
95 };
96
97 struct pmd_devargs {
98         unsigned int num_of_queue;
99         struct devargs_queue {
100                 pcap_dumper_t *dumper;
101                 pcap_t *pcap;
102                 const char *name;
103                 const char *type;
104         } queue[RTE_PMD_PCAP_MAX_QUEUES];
105         int phy_mac;
106 };
107
108 struct pmd_devargs_all {
109         struct pmd_devargs rx_queues;
110         struct pmd_devargs tx_queues;
111         int single_iface;
112         unsigned int is_tx_pcap;
113         unsigned int is_tx_iface;
114         unsigned int is_rx_pcap;
115         unsigned int is_rx_iface;
116         unsigned int infinite_rx;
117 };
118
119 static const char *valid_arguments[] = {
120         ETH_PCAP_RX_PCAP_ARG,
121         ETH_PCAP_TX_PCAP_ARG,
122         ETH_PCAP_RX_IFACE_ARG,
123         ETH_PCAP_RX_IFACE_IN_ARG,
124         ETH_PCAP_TX_IFACE_ARG,
125         ETH_PCAP_IFACE_ARG,
126         ETH_PCAP_PHY_MAC_ARG,
127         ETH_PCAP_INFINITE_RX_ARG,
128         NULL
129 };
130
131 static struct rte_eth_link pmd_link = {
132                 .link_speed = ETH_SPEED_NUM_10G,
133                 .link_duplex = ETH_LINK_FULL_DUPLEX,
134                 .link_status = ETH_LINK_DOWN,
135                 .link_autoneg = ETH_LINK_FIXED,
136 };
137
138 RTE_LOG_REGISTER(eth_pcap_logtype, pmd.net.pcap, NOTICE);
139
140 #define PMD_LOG(level, fmt, args...) \
141         rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
142                 "%s(): " fmt "\n", __func__, ##args)
143
144 static int
145 eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
146                 const u_char *data, uint16_t data_len)
147 {
148         /* Copy the first segment. */
149         uint16_t len = rte_pktmbuf_tailroom(mbuf);
150         struct rte_mbuf *m = mbuf;
151
152         rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
153         data_len -= len;
154         data += len;
155
156         while (data_len > 0) {
157                 /* Allocate next mbuf and point to that. */
158                 m->next = rte_pktmbuf_alloc(mb_pool);
159
160                 if (unlikely(!m->next))
161                         return -1;
162
163                 m = m->next;
164
165                 /* Headroom is not needed in chained mbufs. */
166                 rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
167                 m->pkt_len = 0;
168                 m->data_len = 0;
169
170                 /* Copy next segment. */
171                 len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len);
172                 rte_memcpy(rte_pktmbuf_append(m, len), data, len);
173
174                 mbuf->nb_segs++;
175                 data_len -= len;
176                 data += len;
177         }
178
179         return mbuf->nb_segs;
180 }
181
182 static uint16_t
183 eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
184 {
185         int i;
186         struct pcap_rx_queue *pcap_q = queue;
187         uint32_t rx_bytes = 0;
188
189         if (unlikely(nb_pkts == 0))
190                 return 0;
191
192         if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0)
193                 return 0;
194
195         for (i = 0; i < nb_pkts; i++) {
196                 struct rte_mbuf *pcap_buf;
197                 int err = rte_ring_dequeue(pcap_q->pkts, (void **)&pcap_buf);
198                 if (err)
199                         return i;
200
201                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *),
202                                 rte_pktmbuf_mtod(pcap_buf, void *),
203                                 pcap_buf->data_len);
204                 bufs[i]->data_len = pcap_buf->data_len;
205                 bufs[i]->pkt_len = pcap_buf->pkt_len;
206                 bufs[i]->port = pcap_q->port_id;
207                 rx_bytes += pcap_buf->data_len;
208
209                 /* Enqueue packet back on ring to allow infinite rx. */
210                 rte_ring_enqueue(pcap_q->pkts, pcap_buf);
211         }
212
213         pcap_q->rx_stat.pkts += i;
214         pcap_q->rx_stat.bytes += rx_bytes;
215
216         return i;
217 }
218
219 static uint16_t
220 eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
221 {
222         unsigned int i;
223         struct pcap_pkthdr header;
224         struct pmd_process_private *pp;
225         const u_char *packet;
226         struct rte_mbuf *mbuf;
227         struct pcap_rx_queue *pcap_q = queue;
228         uint16_t num_rx = 0;
229         uint32_t rx_bytes = 0;
230         pcap_t *pcap;
231
232         pp = rte_eth_devices[pcap_q->port_id].process_private;
233         pcap = pp->rx_pcap[pcap_q->queue_id];
234
235         if (unlikely(pcap == NULL || nb_pkts == 0))
236                 return 0;
237
238         /* Reads the given number of packets from the pcap file one by one
239          * and copies the packet data into a newly allocated mbuf to return.
240          */
241         for (i = 0; i < nb_pkts; i++) {
242                 /* Get the next PCAP packet */
243                 packet = pcap_next(pcap, &header);
244                 if (unlikely(packet == NULL))
245                         break;
246
247                 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
248                 if (unlikely(mbuf == NULL))
249                         break;
250
251                 if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) {
252                         /* pcap packet will fit in the mbuf, can copy it */
253                         rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
254                                         header.caplen);
255                         mbuf->data_len = (uint16_t)header.caplen;
256                 } else {
257                         /* Try read jumbo frame into multi mbufs. */
258                         if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
259                                                        mbuf,
260                                                        packet,
261                                                        header.caplen) == -1)) {
262                                 rte_pktmbuf_free(mbuf);
263                                 break;
264                         }
265                 }
266
267                 mbuf->pkt_len = (uint16_t)header.caplen;
268                 mbuf->timestamp = (uint64_t)header.ts.tv_sec * 1000000
269                                                         + header.ts.tv_usec;
270                 mbuf->ol_flags |= PKT_RX_TIMESTAMP;
271                 mbuf->port = pcap_q->port_id;
272                 bufs[num_rx] = mbuf;
273                 num_rx++;
274                 rx_bytes += header.caplen;
275         }
276         pcap_q->rx_stat.pkts += num_rx;
277         pcap_q->rx_stat.bytes += rx_bytes;
278
279         return num_rx;
280 }
281
282 static uint16_t
283 eth_null_rx(void *queue __rte_unused,
284                 struct rte_mbuf **bufs __rte_unused,
285                 uint16_t nb_pkts __rte_unused)
286 {
287         return 0;
288 }
289
290 #define NSEC_PER_SEC    1000000000L
291
292 static inline void
293 calculate_timestamp(struct timeval *ts) {
294         uint64_t cycles;
295         struct timeval cur_time;
296
297         cycles = rte_get_timer_cycles() - start_cycles;
298         cur_time.tv_sec = cycles / hz;
299         cur_time.tv_usec = (cycles % hz) * NSEC_PER_SEC / hz;
300
301         ts->tv_sec = start_time.tv_sec + cur_time.tv_sec;
302         ts->tv_usec = start_time.tv_usec + cur_time.tv_usec;
303         if (ts->tv_usec >= NSEC_PER_SEC) {
304                 ts->tv_usec -= NSEC_PER_SEC;
305                 ts->tv_sec += 1;
306         }
307 }
308
309 /*
310  * Callback to handle writing packets to a pcap file.
311  */
312 static uint16_t
313 eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
314 {
315         unsigned int i;
316         struct rte_mbuf *mbuf;
317         struct pmd_process_private *pp;
318         struct pcap_tx_queue *dumper_q = queue;
319         uint16_t num_tx = 0;
320         uint32_t tx_bytes = 0;
321         struct pcap_pkthdr header;
322         pcap_dumper_t *dumper;
323         unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
324         size_t len, caplen;
325
326         pp = rte_eth_devices[dumper_q->port_id].process_private;
327         dumper = pp->tx_dumper[dumper_q->queue_id];
328
329         if (dumper == NULL || nb_pkts == 0)
330                 return 0;
331
332         /* writes the nb_pkts packets to the previously opened pcap file
333          * dumper */
334         for (i = 0; i < nb_pkts; i++) {
335                 mbuf = bufs[i];
336                 len = caplen = rte_pktmbuf_pkt_len(mbuf);
337                 if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
338                                 len > sizeof(temp_data))) {
339                         caplen = sizeof(temp_data);
340                 }
341
342                 calculate_timestamp(&header.ts);
343                 header.len = len;
344                 header.caplen = caplen;
345                 /* rte_pktmbuf_read() returns a pointer to the data directly
346                  * in the mbuf (when the mbuf is contiguous) or, otherwise,
347                  * a pointer to temp_data after copying into it.
348                  */
349                 pcap_dump((u_char *)dumper, &header,
350                         rte_pktmbuf_read(mbuf, 0, caplen, temp_data));
351
352                 num_tx++;
353                 tx_bytes += caplen;
354                 rte_pktmbuf_free(mbuf);
355         }
356
357         /*
358          * Since there's no place to hook a callback when the forwarding
359          * process stops and to make sure the pcap file is actually written,
360          * we flush the pcap dumper within each burst.
361          */
362         pcap_dump_flush(dumper);
363         dumper_q->tx_stat.pkts += num_tx;
364         dumper_q->tx_stat.bytes += tx_bytes;
365         dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
366
367         return nb_pkts;
368 }
369
370 /*
371  * Callback to handle dropping packets in the infinite rx case.
372  */
373 static uint16_t
374 eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
375 {
376         unsigned int i;
377         uint32_t tx_bytes = 0;
378         struct pcap_tx_queue *tx_queue = queue;
379
380         if (unlikely(nb_pkts == 0))
381                 return 0;
382
383         for (i = 0; i < nb_pkts; i++) {
384                 tx_bytes += bufs[i]->data_len;
385                 rte_pktmbuf_free(bufs[i]);
386         }
387
388         tx_queue->tx_stat.pkts += nb_pkts;
389         tx_queue->tx_stat.bytes += tx_bytes;
390
391         return i;
392 }
393
394 /*
395  * Callback to handle sending packets through a real NIC.
396  */
397 static uint16_t
398 eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
399 {
400         unsigned int i;
401         int ret;
402         struct rte_mbuf *mbuf;
403         struct pmd_process_private *pp;
404         struct pcap_tx_queue *tx_queue = queue;
405         uint16_t num_tx = 0;
406         uint32_t tx_bytes = 0;
407         pcap_t *pcap;
408         unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
409         size_t len;
410
411         pp = rte_eth_devices[tx_queue->port_id].process_private;
412         pcap = pp->tx_pcap[tx_queue->queue_id];
413
414         if (unlikely(nb_pkts == 0 || pcap == NULL))
415                 return 0;
416
417         for (i = 0; i < nb_pkts; i++) {
418                 mbuf = bufs[i];
419                 len = rte_pktmbuf_pkt_len(mbuf);
420                 if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
421                                 len > sizeof(temp_data))) {
422                         PMD_LOG(ERR,
423                                 "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).",
424                                 len, sizeof(temp_data));
425                         rte_pktmbuf_free(mbuf);
426                         continue;
427                 }
428
429                 /* rte_pktmbuf_read() returns a pointer to the data directly
430                  * in the mbuf (when the mbuf is contiguous) or, otherwise,
431                  * a pointer to temp_data after copying into it.
432                  */
433                 ret = pcap_sendpacket(pcap,
434                         rte_pktmbuf_read(mbuf, 0, len, temp_data), len);
435                 if (unlikely(ret != 0))
436                         break;
437                 num_tx++;
438                 tx_bytes += len;
439                 rte_pktmbuf_free(mbuf);
440         }
441
442         tx_queue->tx_stat.pkts += num_tx;
443         tx_queue->tx_stat.bytes += tx_bytes;
444         tx_queue->tx_stat.err_pkts += i - num_tx;
445
446         return i;
447 }
448
449 /*
450  * pcap_open_live wrapper function
451  */
452 static inline int
453 open_iface_live(const char *iface, pcap_t **pcap) {
454         *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
455                         RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
456
457         if (*pcap == NULL) {
458                 PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf);
459                 return -1;
460         }
461
462         return 0;
463 }
464
465 static int
466 open_single_iface(const char *iface, pcap_t **pcap)
467 {
468         if (open_iface_live(iface, pcap) < 0) {
469                 PMD_LOG(ERR, "Couldn't open interface %s", iface);
470                 return -1;
471         }
472
473         return 0;
474 }
475
476 static int
477 open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
478 {
479         pcap_t *tx_pcap;
480
481         /*
482          * We need to create a dummy empty pcap_t to use it
483          * with pcap_dump_open(). We create big enough an Ethernet
484          * pcap holder.
485          */
486         tx_pcap = pcap_open_dead_with_tstamp_precision(DLT_EN10MB,
487                         RTE_ETH_PCAP_SNAPSHOT_LEN, PCAP_TSTAMP_PRECISION_NANO);
488         if (tx_pcap == NULL) {
489                 PMD_LOG(ERR, "Couldn't create dead pcap");
490                 return -1;
491         }
492
493         /* The dumper is created using the previous pcap_t reference */
494         *dumper = pcap_dump_open(tx_pcap, pcap_filename);
495         if (*dumper == NULL) {
496                 pcap_close(tx_pcap);
497                 PMD_LOG(ERR, "Couldn't open %s for writing.",
498                         pcap_filename);
499                 return -1;
500         }
501
502         pcap_close(tx_pcap);
503         return 0;
504 }
505
506 static int
507 open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
508 {
509         *pcap = pcap_open_offline(pcap_filename, errbuf);
510         if (*pcap == NULL) {
511                 PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename,
512                         errbuf);
513                 return -1;
514         }
515
516         return 0;
517 }
518
519 static uint64_t
520 count_packets_in_pcap(pcap_t **pcap, struct pcap_rx_queue *pcap_q)
521 {
522         const u_char *packet;
523         struct pcap_pkthdr header;
524         uint64_t pcap_pkt_count = 0;
525
526         while ((packet = pcap_next(*pcap, &header)))
527                 pcap_pkt_count++;
528
529         /* The pcap is reopened so it can be used as normal later. */
530         pcap_close(*pcap);
531         *pcap = NULL;
532         open_single_rx_pcap(pcap_q->name, pcap);
533
534         return pcap_pkt_count;
535 }
536
537 static int
538 eth_dev_start(struct rte_eth_dev *dev)
539 {
540         unsigned int i;
541         struct pmd_internals *internals = dev->data->dev_private;
542         struct pmd_process_private *pp = dev->process_private;
543         struct pcap_tx_queue *tx;
544         struct pcap_rx_queue *rx;
545
546         /* Special iface case. Single pcap is open and shared between tx/rx. */
547         if (internals->single_iface) {
548                 tx = &internals->tx_queue[0];
549                 rx = &internals->rx_queue[0];
550
551                 if (!pp->tx_pcap[0] &&
552                         strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
553                         if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0)
554                                 return -1;
555                         pp->rx_pcap[0] = pp->tx_pcap[0];
556                 }
557
558                 goto status_up;
559         }
560
561         /* If not open already, open tx pcaps/dumpers */
562         for (i = 0; i < dev->data->nb_tx_queues; i++) {
563                 tx = &internals->tx_queue[i];
564
565                 if (!pp->tx_dumper[i] &&
566                                 strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
567                         if (open_single_tx_pcap(tx->name,
568                                 &pp->tx_dumper[i]) < 0)
569                                 return -1;
570                 } else if (!pp->tx_pcap[i] &&
571                                 strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
572                         if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0)
573                                 return -1;
574                 }
575         }
576
577         /* If not open already, open rx pcaps */
578         for (i = 0; i < dev->data->nb_rx_queues; i++) {
579                 rx = &internals->rx_queue[i];
580
581                 if (pp->rx_pcap[i] != NULL)
582                         continue;
583
584                 if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
585                         if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0)
586                                 return -1;
587                 } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
588                         if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0)
589                                 return -1;
590                 }
591         }
592
593 status_up:
594         for (i = 0; i < dev->data->nb_rx_queues; i++)
595                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
596
597         for (i = 0; i < dev->data->nb_tx_queues; i++)
598                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
599
600         dev->data->dev_link.link_status = ETH_LINK_UP;
601
602         return 0;
603 }
604
605 /*
606  * This function gets called when the current port gets stopped.
607  * Is the only place for us to close all the tx streams dumpers.
608  * If not called the dumpers will be flushed within each tx burst.
609  */
610 static void
611 eth_dev_stop(struct rte_eth_dev *dev)
612 {
613         unsigned int i;
614         struct pmd_internals *internals = dev->data->dev_private;
615         struct pmd_process_private *pp = dev->process_private;
616
617         /* Special iface case. Single pcap is open and shared between tx/rx. */
618         if (internals->single_iface) {
619                 pcap_close(pp->tx_pcap[0]);
620                 pp->tx_pcap[0] = NULL;
621                 pp->rx_pcap[0] = NULL;
622                 goto status_down;
623         }
624
625         for (i = 0; i < dev->data->nb_tx_queues; i++) {
626                 if (pp->tx_dumper[i] != NULL) {
627                         pcap_dump_close(pp->tx_dumper[i]);
628                         pp->tx_dumper[i] = NULL;
629                 }
630
631                 if (pp->tx_pcap[i] != NULL) {
632                         pcap_close(pp->tx_pcap[i]);
633                         pp->tx_pcap[i] = NULL;
634                 }
635         }
636
637         for (i = 0; i < dev->data->nb_rx_queues; i++) {
638                 if (pp->rx_pcap[i] != NULL) {
639                         pcap_close(pp->rx_pcap[i]);
640                         pp->rx_pcap[i] = NULL;
641                 }
642         }
643
644 status_down:
645         for (i = 0; i < dev->data->nb_rx_queues; i++)
646                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
647
648         for (i = 0; i < dev->data->nb_tx_queues; i++)
649                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
650
651         dev->data->dev_link.link_status = ETH_LINK_DOWN;
652 }
653
654 static int
655 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
656 {
657         return 0;
658 }
659
660 static int
661 eth_dev_info(struct rte_eth_dev *dev,
662                 struct rte_eth_dev_info *dev_info)
663 {
664         struct pmd_internals *internals = dev->data->dev_private;
665
666         dev_info->if_index = internals->if_index;
667         dev_info->max_mac_addrs = 1;
668         dev_info->max_rx_pktlen = (uint32_t) -1;
669         dev_info->max_rx_queues = dev->data->nb_rx_queues;
670         dev_info->max_tx_queues = dev->data->nb_tx_queues;
671         dev_info->min_rx_bufsize = 0;
672
673         return 0;
674 }
675
676 static int
677 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
678 {
679         unsigned int i;
680         unsigned long rx_packets_total = 0, rx_bytes_total = 0;
681         unsigned long tx_packets_total = 0, tx_bytes_total = 0;
682         unsigned long tx_packets_err_total = 0;
683         const struct pmd_internals *internal = dev->data->dev_private;
684
685         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
686                         i < dev->data->nb_rx_queues; i++) {
687                 stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
688                 stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
689                 rx_packets_total += stats->q_ipackets[i];
690                 rx_bytes_total += stats->q_ibytes[i];
691         }
692
693         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
694                         i < dev->data->nb_tx_queues; i++) {
695                 stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
696                 stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
697                 tx_packets_total += stats->q_opackets[i];
698                 tx_bytes_total += stats->q_obytes[i];
699                 tx_packets_err_total += internal->tx_queue[i].tx_stat.err_pkts;
700         }
701
702         stats->ipackets = rx_packets_total;
703         stats->ibytes = rx_bytes_total;
704         stats->opackets = tx_packets_total;
705         stats->obytes = tx_bytes_total;
706         stats->oerrors = tx_packets_err_total;
707
708         return 0;
709 }
710
711 static int
712 eth_stats_reset(struct rte_eth_dev *dev)
713 {
714         unsigned int i;
715         struct pmd_internals *internal = dev->data->dev_private;
716
717         for (i = 0; i < dev->data->nb_rx_queues; i++) {
718                 internal->rx_queue[i].rx_stat.pkts = 0;
719                 internal->rx_queue[i].rx_stat.bytes = 0;
720         }
721
722         for (i = 0; i < dev->data->nb_tx_queues; i++) {
723                 internal->tx_queue[i].tx_stat.pkts = 0;
724                 internal->tx_queue[i].tx_stat.bytes = 0;
725                 internal->tx_queue[i].tx_stat.err_pkts = 0;
726         }
727
728         return 0;
729 }
730
731 static int
732 eth_dev_close(struct rte_eth_dev *dev)
733 {
734         unsigned int i;
735         struct pmd_internals *internals = dev->data->dev_private;
736
737         PMD_LOG(INFO, "Closing pcap ethdev on NUMA socket %d",
738                         rte_socket_id());
739
740         rte_free(dev->process_private);
741
742         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
743                 return 0;
744
745         /* Device wide flag, but cleanup must be performed per queue. */
746         if (internals->infinite_rx) {
747                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
748                         struct pcap_rx_queue *pcap_q = &internals->rx_queue[i];
749                         struct rte_mbuf *pcap_buf;
750
751                         while (!rte_ring_dequeue(pcap_q->pkts,
752                                         (void **)&pcap_buf))
753                                 rte_pktmbuf_free(pcap_buf);
754
755                         rte_ring_free(pcap_q->pkts);
756                 }
757         }
758
759         if (internals->phy_mac == 0)
760                 /* not dynamically allocated, must not be freed */
761                 dev->data->mac_addrs = NULL;
762
763         return 0;
764 }
765
766 static void
767 eth_queue_release(void *q __rte_unused)
768 {
769 }
770
771 static int
772 eth_link_update(struct rte_eth_dev *dev __rte_unused,
773                 int wait_to_complete __rte_unused)
774 {
775         return 0;
776 }
777
778 static int
779 eth_rx_queue_setup(struct rte_eth_dev *dev,
780                 uint16_t rx_queue_id,
781                 uint16_t nb_rx_desc __rte_unused,
782                 unsigned int socket_id __rte_unused,
783                 const struct rte_eth_rxconf *rx_conf __rte_unused,
784                 struct rte_mempool *mb_pool)
785 {
786         struct pmd_internals *internals = dev->data->dev_private;
787         struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
788
789         pcap_q->mb_pool = mb_pool;
790         pcap_q->port_id = dev->data->port_id;
791         pcap_q->queue_id = rx_queue_id;
792         dev->data->rx_queues[rx_queue_id] = pcap_q;
793
794         if (internals->infinite_rx) {
795                 struct pmd_process_private *pp;
796                 char ring_name[NAME_MAX];
797                 static uint32_t ring_number;
798                 uint64_t pcap_pkt_count = 0;
799                 struct rte_mbuf *bufs[1];
800                 pcap_t **pcap;
801
802                 pp = rte_eth_devices[pcap_q->port_id].process_private;
803                 pcap = &pp->rx_pcap[pcap_q->queue_id];
804
805                 if (unlikely(*pcap == NULL))
806                         return -ENOENT;
807
808                 pcap_pkt_count = count_packets_in_pcap(pcap, pcap_q);
809
810                 snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu16,
811                                 ring_number);
812
813                 pcap_q->pkts = rte_ring_create(ring_name,
814                                 rte_align64pow2(pcap_pkt_count + 1), 0,
815                                 RING_F_SP_ENQ | RING_F_SC_DEQ);
816                 ring_number++;
817                 if (!pcap_q->pkts)
818                         return -ENOENT;
819
820                 /* Fill ring with packets from PCAP file one by one. */
821                 while (eth_pcap_rx(pcap_q, bufs, 1)) {
822                         /* Check for multiseg mbufs. */
823                         if (bufs[0]->nb_segs != 1) {
824                                 rte_pktmbuf_free(*bufs);
825
826                                 while (!rte_ring_dequeue(pcap_q->pkts,
827                                                 (void **)bufs))
828                                         rte_pktmbuf_free(*bufs);
829
830                                 rte_ring_free(pcap_q->pkts);
831                                 PMD_LOG(ERR, "Multiseg mbufs are not supported in infinite_rx "
832                                                 "mode.");
833                                 return -EINVAL;
834                         }
835
836                         rte_ring_enqueue_bulk(pcap_q->pkts,
837                                         (void * const *)bufs, 1, NULL);
838                 }
839                 /*
840                  * Reset the stats for this queue since eth_pcap_rx calls above
841                  * didn't result in the application receiving packets.
842                  */
843                 pcap_q->rx_stat.pkts = 0;
844                 pcap_q->rx_stat.bytes = 0;
845         }
846
847         return 0;
848 }
849
850 static int
851 eth_tx_queue_setup(struct rte_eth_dev *dev,
852                 uint16_t tx_queue_id,
853                 uint16_t nb_tx_desc __rte_unused,
854                 unsigned int socket_id __rte_unused,
855                 const struct rte_eth_txconf *tx_conf __rte_unused)
856 {
857         struct pmd_internals *internals = dev->data->dev_private;
858         struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
859
860         pcap_q->port_id = dev->data->port_id;
861         pcap_q->queue_id = tx_queue_id;
862         dev->data->tx_queues[tx_queue_id] = pcap_q;
863
864         return 0;
865 }
866
867 static int
868 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
869 {
870         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
871
872         return 0;
873 }
874
875 static int
876 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
877 {
878         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
879
880         return 0;
881 }
882
883 static int
884 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
885 {
886         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
887
888         return 0;
889 }
890
891 static int
892 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
893 {
894         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
895
896         return 0;
897 }
898
899 static const struct eth_dev_ops ops = {
900         .dev_start = eth_dev_start,
901         .dev_stop = eth_dev_stop,
902         .dev_close = eth_dev_close,
903         .dev_configure = eth_dev_configure,
904         .dev_infos_get = eth_dev_info,
905         .rx_queue_setup = eth_rx_queue_setup,
906         .tx_queue_setup = eth_tx_queue_setup,
907         .rx_queue_start = eth_rx_queue_start,
908         .tx_queue_start = eth_tx_queue_start,
909         .rx_queue_stop = eth_rx_queue_stop,
910         .tx_queue_stop = eth_tx_queue_stop,
911         .rx_queue_release = eth_queue_release,
912         .tx_queue_release = eth_queue_release,
913         .link_update = eth_link_update,
914         .stats_get = eth_stats_get,
915         .stats_reset = eth_stats_reset,
916 };
917
918 static int
919 add_queue(struct pmd_devargs *pmd, const char *name, const char *type,
920                 pcap_t *pcap, pcap_dumper_t *dumper)
921 {
922         if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
923                 return -1;
924         if (pcap)
925                 pmd->queue[pmd->num_of_queue].pcap = pcap;
926         if (dumper)
927                 pmd->queue[pmd->num_of_queue].dumper = dumper;
928         pmd->queue[pmd->num_of_queue].name = name;
929         pmd->queue[pmd->num_of_queue].type = type;
930         pmd->num_of_queue++;
931         return 0;
932 }
933
934 /*
935  * Function handler that opens the pcap file for reading a stores a
936  * reference of it for use it later on.
937  */
938 static int
939 open_rx_pcap(const char *key, const char *value, void *extra_args)
940 {
941         const char *pcap_filename = value;
942         struct pmd_devargs *rx = extra_args;
943         pcap_t *pcap = NULL;
944
945         if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
946                 return -1;
947
948         if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) {
949                 pcap_close(pcap);
950                 return -1;
951         }
952
953         return 0;
954 }
955
956 /*
957  * Opens a pcap file for writing and stores a reference to it
958  * for use it later on.
959  */
960 static int
961 open_tx_pcap(const char *key, const char *value, void *extra_args)
962 {
963         const char *pcap_filename = value;
964         struct pmd_devargs *dumpers = extra_args;
965         pcap_dumper_t *dumper;
966
967         if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
968                 return -1;
969
970         if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) {
971                 pcap_dump_close(dumper);
972                 return -1;
973         }
974
975         return 0;
976 }
977
978 /*
979  * Opens an interface for reading and writing
980  */
981 static inline int
982 open_rx_tx_iface(const char *key, const char *value, void *extra_args)
983 {
984         const char *iface = value;
985         struct pmd_devargs *tx = extra_args;
986         pcap_t *pcap = NULL;
987
988         if (open_single_iface(iface, &pcap) < 0)
989                 return -1;
990
991         tx->queue[0].pcap = pcap;
992         tx->queue[0].name = iface;
993         tx->queue[0].type = key;
994
995         return 0;
996 }
997
998 static inline int
999 set_iface_direction(const char *iface, pcap_t *pcap,
1000                 pcap_direction_t direction)
1001 {
1002         const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT";
1003         if (pcap_setdirection(pcap, direction) < 0) {
1004                 PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n",
1005                                 iface, direction_str, pcap_geterr(pcap));
1006                 return -1;
1007         }
1008         PMD_LOG(INFO, "Setting %s pcap direction %s\n",
1009                         iface, direction_str);
1010         return 0;
1011 }
1012
1013 static inline int
1014 open_iface(const char *key, const char *value, void *extra_args)
1015 {
1016         const char *iface = value;
1017         struct pmd_devargs *pmd = extra_args;
1018         pcap_t *pcap = NULL;
1019
1020         if (open_single_iface(iface, &pcap) < 0)
1021                 return -1;
1022         if (add_queue(pmd, iface, key, pcap, NULL) < 0) {
1023                 pcap_close(pcap);
1024                 return -1;
1025         }
1026
1027         return 0;
1028 }
1029
1030 /*
1031  * Opens a NIC for reading packets from it
1032  */
1033 static inline int
1034 open_rx_iface(const char *key, const char *value, void *extra_args)
1035 {
1036         int ret = open_iface(key, value, extra_args);
1037         if (ret < 0)
1038                 return ret;
1039         if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) {
1040                 struct pmd_devargs *pmd = extra_args;
1041                 unsigned int qid = pmd->num_of_queue - 1;
1042
1043                 set_iface_direction(pmd->queue[qid].name,
1044                                 pmd->queue[qid].pcap,
1045                                 PCAP_D_IN);
1046         }
1047
1048         return 0;
1049 }
1050
1051 static inline int
1052 rx_iface_args_process(const char *key, const char *value, void *extra_args)
1053 {
1054         if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 ||
1055                         strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0)
1056                 return open_rx_iface(key, value, extra_args);
1057
1058         return 0;
1059 }
1060
1061 /*
1062  * Opens a NIC for writing packets to it
1063  */
1064 static int
1065 open_tx_iface(const char *key, const char *value, void *extra_args)
1066 {
1067         return open_iface(key, value, extra_args);
1068 }
1069
1070 static int
1071 select_phy_mac(const char *key __rte_unused, const char *value,
1072                 void *extra_args)
1073 {
1074         if (extra_args) {
1075                 const int phy_mac = atoi(value);
1076                 int *enable_phy_mac = extra_args;
1077
1078                 if (phy_mac)
1079                         *enable_phy_mac = 1;
1080         }
1081         return 0;
1082 }
1083
1084 static int
1085 get_infinite_rx_arg(const char *key __rte_unused,
1086                 const char *value, void *extra_args)
1087 {
1088         if (extra_args) {
1089                 const int infinite_rx = atoi(value);
1090                 int *enable_infinite_rx = extra_args;
1091
1092                 if (infinite_rx > 0)
1093                         *enable_infinite_rx = 1;
1094         }
1095         return 0;
1096 }
1097
1098 static int
1099 pmd_init_internals(struct rte_vdev_device *vdev,
1100                 const unsigned int nb_rx_queues,
1101                 const unsigned int nb_tx_queues,
1102                 struct pmd_internals **internals,
1103                 struct rte_eth_dev **eth_dev)
1104 {
1105         struct rte_eth_dev_data *data;
1106         struct pmd_process_private *pp;
1107         unsigned int numa_node = vdev->device.numa_node;
1108
1109         PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
1110                 numa_node);
1111
1112         pp = (struct pmd_process_private *)
1113                 rte_zmalloc(NULL, sizeof(struct pmd_process_private),
1114                                 RTE_CACHE_LINE_SIZE);
1115
1116         if (pp == NULL) {
1117                 PMD_LOG(ERR,
1118                         "Failed to allocate memory for process private");
1119                 return -1;
1120         }
1121
1122         /* reserve an ethdev entry */
1123         *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
1124         if (!(*eth_dev)) {
1125                 rte_free(pp);
1126                 return -1;
1127         }
1128         (*eth_dev)->process_private = pp;
1129         /* now put it all together
1130          * - store queue data in internals,
1131          * - store numa_node info in eth_dev
1132          * - point eth_dev_data to internals
1133          * - and point eth_dev structure to new eth_dev_data structure
1134          */
1135         *internals = (*eth_dev)->data->dev_private;
1136         /*
1137          * Interface MAC = 02:70:63:61:70:<iface_idx>
1138          * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx'
1139          * where the middle 4 characters are converted to hex.
1140          */
1141         (*internals)->eth_addr = (struct rte_ether_addr) {
1142                 .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ }
1143         };
1144         (*internals)->phy_mac = 0;
1145         data = (*eth_dev)->data;
1146         data->nb_rx_queues = (uint16_t)nb_rx_queues;
1147         data->nb_tx_queues = (uint16_t)nb_tx_queues;
1148         data->dev_link = pmd_link;
1149         data->mac_addrs = &(*internals)->eth_addr;
1150         data->promiscuous = 1;
1151         data->all_multicast = 1;
1152
1153         /*
1154          * NOTE: we'll replace the data element, of originally allocated
1155          * eth_dev so the rings are local per-process
1156          */
1157         (*eth_dev)->dev_ops = &ops;
1158
1159         strlcpy((*internals)->devargs, rte_vdev_device_args(vdev),
1160                         ETH_PCAP_ARG_MAXLEN);
1161
1162         return 0;
1163 }
1164
1165 static int
1166 eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev,
1167                 const unsigned int numa_node)
1168 {
1169 #if defined(RTE_EXEC_ENV_LINUX)
1170         void *mac_addrs;
1171         struct ifreq ifr;
1172         int if_fd = socket(AF_INET, SOCK_DGRAM, 0);
1173
1174         if (if_fd == -1)
1175                 return -1;
1176
1177         rte_strscpy(ifr.ifr_name, if_name, sizeof(ifr.ifr_name));
1178         if (ioctl(if_fd, SIOCGIFHWADDR, &ifr)) {
1179                 close(if_fd);
1180                 return -1;
1181         }
1182
1183         mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
1184         if (!mac_addrs) {
1185                 close(if_fd);
1186                 return -1;
1187         }
1188
1189         PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1190         eth_dev->data->mac_addrs = mac_addrs;
1191         rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1192                         ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1193
1194         close(if_fd);
1195
1196         return 0;
1197
1198 #elif defined(RTE_EXEC_ENV_FREEBSD)
1199         void *mac_addrs;
1200         struct if_msghdr *ifm;
1201         struct sockaddr_dl *sdl;
1202         int mib[6];
1203         size_t len = 0;
1204         char *buf;
1205
1206         mib[0] = CTL_NET;
1207         mib[1] = AF_ROUTE;
1208         mib[2] = 0;
1209         mib[3] = AF_LINK;
1210         mib[4] = NET_RT_IFLIST;
1211         mib[5] = if_nametoindex(if_name);
1212
1213         if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0)
1214                 return -1;
1215
1216         if (len == 0)
1217                 return -1;
1218
1219         buf = rte_malloc(NULL, len, 0);
1220         if (!buf)
1221                 return -1;
1222
1223         if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) {
1224                 rte_free(buf);
1225                 return -1;
1226         }
1227         ifm = (struct if_msghdr *)buf;
1228         sdl = (struct sockaddr_dl *)(ifm + 1);
1229
1230         mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
1231         if (!mac_addrs) {
1232                 rte_free(buf);
1233                 return -1;
1234         }
1235
1236         PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1237         eth_dev->data->mac_addrs = mac_addrs;
1238         rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1239                         LLADDR(sdl), RTE_ETHER_ADDR_LEN);
1240
1241         rte_free(buf);
1242
1243         return 0;
1244 #else
1245         return -1;
1246 #endif
1247 }
1248
1249 static int
1250 eth_from_pcaps_common(struct rte_vdev_device *vdev,
1251                 struct pmd_devargs_all *devargs_all,
1252                 struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
1253 {
1254         struct pmd_process_private *pp;
1255         struct pmd_devargs *rx_queues = &devargs_all->rx_queues;
1256         struct pmd_devargs *tx_queues = &devargs_all->tx_queues;
1257         const unsigned int nb_rx_queues = rx_queues->num_of_queue;
1258         const unsigned int nb_tx_queues = tx_queues->num_of_queue;
1259         unsigned int i;
1260
1261         if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals,
1262                         eth_dev) < 0)
1263                 return -1;
1264
1265         pp = (*eth_dev)->process_private;
1266         for (i = 0; i < nb_rx_queues; i++) {
1267                 struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
1268                 struct devargs_queue *queue = &rx_queues->queue[i];
1269
1270                 pp->rx_pcap[i] = queue->pcap;
1271                 strlcpy(rx->name, queue->name, sizeof(rx->name));
1272                 strlcpy(rx->type, queue->type, sizeof(rx->type));
1273         }
1274
1275         for (i = 0; i < nb_tx_queues; i++) {
1276                 struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
1277                 struct devargs_queue *queue = &tx_queues->queue[i];
1278
1279                 pp->tx_dumper[i] = queue->dumper;
1280                 pp->tx_pcap[i] = queue->pcap;
1281                 strlcpy(tx->name, queue->name, sizeof(tx->name));
1282                 strlcpy(tx->type, queue->type, sizeof(tx->type));
1283         }
1284
1285         return 0;
1286 }
1287
1288 static int
1289 eth_from_pcaps(struct rte_vdev_device *vdev,
1290                 struct pmd_devargs_all *devargs_all)
1291 {
1292         struct pmd_internals *internals = NULL;
1293         struct rte_eth_dev *eth_dev = NULL;
1294         struct pmd_devargs *rx_queues = &devargs_all->rx_queues;
1295         int single_iface = devargs_all->single_iface;
1296         unsigned int infinite_rx = devargs_all->infinite_rx;
1297         int ret;
1298
1299         ret = eth_from_pcaps_common(vdev, devargs_all, &internals, &eth_dev);
1300
1301         if (ret < 0)
1302                 return ret;
1303
1304         /* store weather we are using a single interface for rx/tx or not */
1305         internals->single_iface = single_iface;
1306
1307         if (single_iface) {
1308                 internals->if_index = if_nametoindex(rx_queues->queue[0].name);
1309
1310                 /* phy_mac arg is applied only only if "iface" devarg is provided */
1311                 if (rx_queues->phy_mac) {
1312                         int ret = eth_pcap_update_mac(rx_queues->queue[0].name,
1313                                         eth_dev, vdev->device.numa_node);
1314                         if (ret == 0)
1315                                 internals->phy_mac = 1;
1316                 }
1317         }
1318
1319         internals->infinite_rx = infinite_rx;
1320         /* Assign rx ops. */
1321         if (infinite_rx)
1322                 eth_dev->rx_pkt_burst = eth_pcap_rx_infinite;
1323         else if (devargs_all->is_rx_pcap || devargs_all->is_rx_iface ||
1324                         single_iface)
1325                 eth_dev->rx_pkt_burst = eth_pcap_rx;
1326         else
1327                 eth_dev->rx_pkt_burst = eth_null_rx;
1328
1329         /* Assign tx ops. */
1330         if (devargs_all->is_tx_pcap)
1331                 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1332         else if (devargs_all->is_tx_iface || single_iface)
1333                 eth_dev->tx_pkt_burst = eth_pcap_tx;
1334         else
1335                 eth_dev->tx_pkt_burst = eth_tx_drop;
1336
1337         rte_eth_dev_probing_finish(eth_dev);
1338         return 0;
1339 }
1340
1341 static int
1342 pmd_pcap_probe(struct rte_vdev_device *dev)
1343 {
1344         const char *name;
1345         struct rte_kvargs *kvlist;
1346         struct pmd_devargs pcaps = {0};
1347         struct pmd_devargs dumpers = {0};
1348         struct rte_eth_dev *eth_dev =  NULL;
1349         struct pmd_internals *internal;
1350         int ret = 0;
1351
1352         struct pmd_devargs_all devargs_all = {
1353                 .single_iface = 0,
1354                 .is_tx_pcap = 0,
1355                 .is_tx_iface = 0,
1356                 .infinite_rx = 0,
1357         };
1358
1359         name = rte_vdev_device_name(dev);
1360         PMD_LOG(INFO, "Initializing pmd_pcap for %s", name);
1361
1362         gettimeofday(&start_time, NULL);
1363         start_cycles = rte_get_timer_cycles();
1364         hz = rte_get_timer_hz();
1365
1366         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1367                 eth_dev = rte_eth_dev_attach_secondary(name);
1368                 if (!eth_dev) {
1369                         PMD_LOG(ERR, "Failed to probe %s", name);
1370                         return -1;
1371                 }
1372
1373                 internal = eth_dev->data->dev_private;
1374
1375                 kvlist = rte_kvargs_parse(internal->devargs, valid_arguments);
1376                 if (kvlist == NULL)
1377                         return -1;
1378         } else {
1379                 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
1380                                 valid_arguments);
1381                 if (kvlist == NULL)
1382                         return -1;
1383         }
1384
1385         /*
1386          * If iface argument is passed we open the NICs and use them for
1387          * reading / writing
1388          */
1389         if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
1390
1391                 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
1392                                 &open_rx_tx_iface, &pcaps);
1393                 if (ret < 0)
1394                         goto free_kvlist;
1395
1396                 dumpers.queue[0] = pcaps.queue[0];
1397
1398                 ret = rte_kvargs_process(kvlist, ETH_PCAP_PHY_MAC_ARG,
1399                                 &select_phy_mac, &pcaps.phy_mac);
1400                 if (ret < 0)
1401                         goto free_kvlist;
1402
1403                 dumpers.phy_mac = pcaps.phy_mac;
1404
1405                 devargs_all.single_iface = 1;
1406                 pcaps.num_of_queue = 1;
1407                 dumpers.num_of_queue = 1;
1408
1409                 goto create_eth;
1410         }
1411
1412         /*
1413          * We check whether we want to open a RX stream from a real NIC, a
1414          * pcap file or open a dummy RX stream
1415          */
1416         devargs_all.is_rx_pcap =
1417                 rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
1418         devargs_all.is_rx_iface =
1419                 rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_ARG) ? 1 : 0;
1420         pcaps.num_of_queue = 0;
1421
1422         devargs_all.is_tx_pcap =
1423                 rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
1424         devargs_all.is_tx_iface =
1425                 rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG) ? 1 : 0;
1426         dumpers.num_of_queue = 0;
1427
1428         if (devargs_all.is_rx_pcap) {
1429                 /*
1430                  * We check whether we want to infinitely rx the pcap file.
1431                  */
1432                 unsigned int infinite_rx_arg_cnt = rte_kvargs_count(kvlist,
1433                                 ETH_PCAP_INFINITE_RX_ARG);
1434
1435                 if (infinite_rx_arg_cnt == 1) {
1436                         ret = rte_kvargs_process(kvlist,
1437                                         ETH_PCAP_INFINITE_RX_ARG,
1438                                         &get_infinite_rx_arg,
1439                                         &devargs_all.infinite_rx);
1440                         if (ret < 0)
1441                                 goto free_kvlist;
1442                         PMD_LOG(INFO, "infinite_rx has been %s for %s",
1443                                         devargs_all.infinite_rx ? "enabled" : "disabled",
1444                                         name);
1445
1446                 } else if (infinite_rx_arg_cnt > 1) {
1447                         PMD_LOG(WARNING, "infinite_rx has not been enabled since the "
1448                                         "argument has been provided more than once "
1449                                         "for %s", name);
1450                 }
1451
1452                 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
1453                                 &open_rx_pcap, &pcaps);
1454         } else if (devargs_all.is_rx_iface) {
1455                 ret = rte_kvargs_process(kvlist, NULL,
1456                                 &rx_iface_args_process, &pcaps);
1457         } else if (devargs_all.is_tx_iface || devargs_all.is_tx_pcap) {
1458                 unsigned int i;
1459
1460                 /* Count number of tx queue args passed before dummy rx queue
1461                  * creation so a dummy rx queue can be created for each tx queue
1462                  */
1463                 unsigned int num_tx_queues =
1464                         (rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) +
1465                         rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG));
1466
1467                 PMD_LOG(INFO, "Creating null rx queue since no rx queues were provided.");
1468
1469                 /* Creating a dummy rx queue for each tx queue passed */
1470                 for (i = 0; i < num_tx_queues; i++)
1471                         ret = add_queue(&pcaps, "dummy_rx", "rx_null", NULL,
1472                                         NULL);
1473         } else {
1474                 PMD_LOG(ERR, "Error - No rx or tx queues provided");
1475                 ret = -ENOENT;
1476         }
1477         if (ret < 0)
1478                 goto free_kvlist;
1479
1480         /*
1481          * We check whether we want to open a TX stream to a real NIC,
1482          * a pcap file, or drop packets on tx
1483          */
1484         if (devargs_all.is_tx_pcap) {
1485                 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
1486                                 &open_tx_pcap, &dumpers);
1487         } else if (devargs_all.is_tx_iface) {
1488                 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
1489                                 &open_tx_iface, &dumpers);
1490         } else {
1491                 unsigned int i;
1492
1493                 PMD_LOG(INFO, "Dropping packets on tx since no tx queues were provided.");
1494
1495                 /* Add 1 dummy queue per rxq which counts and drops packets. */
1496                 for (i = 0; i < pcaps.num_of_queue; i++)
1497                         ret = add_queue(&dumpers, "dummy_tx", "tx_drop", NULL,
1498                                         NULL);
1499         }
1500
1501         if (ret < 0)
1502                 goto free_kvlist;
1503
1504 create_eth:
1505         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1506                 struct pmd_process_private *pp;
1507                 unsigned int i;
1508
1509                 internal = eth_dev->data->dev_private;
1510                         pp = (struct pmd_process_private *)
1511                                 rte_zmalloc(NULL,
1512                                         sizeof(struct pmd_process_private),
1513                                         RTE_CACHE_LINE_SIZE);
1514
1515                 if (pp == NULL) {
1516                         PMD_LOG(ERR,
1517                                 "Failed to allocate memory for process private");
1518                         ret = -1;
1519                         goto free_kvlist;
1520                 }
1521
1522                 eth_dev->dev_ops = &ops;
1523                 eth_dev->device = &dev->device;
1524
1525                 /* setup process private */
1526                 for (i = 0; i < pcaps.num_of_queue; i++)
1527                         pp->rx_pcap[i] = pcaps.queue[i].pcap;
1528
1529                 for (i = 0; i < dumpers.num_of_queue; i++) {
1530                         pp->tx_dumper[i] = dumpers.queue[i].dumper;
1531                         pp->tx_pcap[i] = dumpers.queue[i].pcap;
1532                 }
1533
1534                 eth_dev->process_private = pp;
1535                 eth_dev->rx_pkt_burst = eth_pcap_rx;
1536                 if (devargs_all.is_tx_pcap)
1537                         eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1538                 else
1539                         eth_dev->tx_pkt_burst = eth_pcap_tx;
1540
1541                 rte_eth_dev_probing_finish(eth_dev);
1542                 goto free_kvlist;
1543         }
1544
1545         devargs_all.rx_queues = pcaps;
1546         devargs_all.tx_queues = dumpers;
1547
1548         ret = eth_from_pcaps(dev, &devargs_all);
1549
1550 free_kvlist:
1551         rte_kvargs_free(kvlist);
1552
1553         return ret;
1554 }
1555
1556 static int
1557 pmd_pcap_remove(struct rte_vdev_device *dev)
1558 {
1559         struct rte_eth_dev *eth_dev = NULL;
1560
1561         if (!dev)
1562                 return -1;
1563
1564         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1565         if (eth_dev == NULL)
1566                 return 0; /* port already released */
1567
1568         eth_dev_close(eth_dev);
1569         rte_eth_dev_release_port(eth_dev);
1570
1571         return 0;
1572 }
1573
1574 static struct rte_vdev_driver pmd_pcap_drv = {
1575         .probe = pmd_pcap_probe,
1576         .remove = pmd_pcap_remove,
1577 };
1578
1579 RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
1580 RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap);
1581 RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
1582         ETH_PCAP_RX_PCAP_ARG "=<string> "
1583         ETH_PCAP_TX_PCAP_ARG "=<string> "
1584         ETH_PCAP_RX_IFACE_ARG "=<ifc> "
1585         ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
1586         ETH_PCAP_TX_IFACE_ARG "=<ifc> "
1587         ETH_PCAP_IFACE_ARG "=<ifc> "
1588         ETH_PCAP_PHY_MAC_ARG "=<int>"
1589         ETH_PCAP_INFINITE_RX_ARG "=<0|1>");