net/pcap: improve Rx statistics
[dpdk.git] / drivers / net / pcap / pcap_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright(c) 2014 6WIND S.A.
4  * All rights reserved.
5  */
6
7 #include <time.h>
8
9 #include <pcap.h>
10
11 #include <rte_cycles.h>
12 #include <ethdev_driver.h>
13 #include <ethdev_vdev.h>
14 #include <rte_kvargs.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_mbuf_dyn.h>
18 #include <rte_bus_vdev.h>
19 #include <rte_os_shim.h>
20
21 #include "pcap_osdep.h"
22
23 #define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
24 #define RTE_ETH_PCAP_SNAPLEN RTE_ETHER_MAX_JUMBO_FRAME_LEN
25 #define RTE_ETH_PCAP_PROMISC 1
26 #define RTE_ETH_PCAP_TIMEOUT -1
27
28 #define ETH_PCAP_RX_PCAP_ARG  "rx_pcap"
29 #define ETH_PCAP_TX_PCAP_ARG  "tx_pcap"
30 #define ETH_PCAP_RX_IFACE_ARG "rx_iface"
31 #define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in"
32 #define ETH_PCAP_TX_IFACE_ARG "tx_iface"
33 #define ETH_PCAP_IFACE_ARG    "iface"
34 #define ETH_PCAP_PHY_MAC_ARG  "phy_mac"
35 #define ETH_PCAP_INFINITE_RX_ARG  "infinite_rx"
36
37 #define ETH_PCAP_ARG_MAXLEN     64
38
39 #define RTE_PMD_PCAP_MAX_QUEUES 16
40
41 static char errbuf[PCAP_ERRBUF_SIZE];
42 static struct timespec start_time;
43 static uint64_t start_cycles;
44 static uint64_t hz;
45 static uint8_t iface_idx;
46
47 static uint64_t timestamp_rx_dynflag;
48 static int timestamp_dynfield_offset = -1;
49
50 struct queue_stat {
51         volatile unsigned long pkts;
52         volatile unsigned long bytes;
53         volatile unsigned long err_pkts;
54         volatile unsigned long rx_nombuf;
55 };
56
57 struct queue_missed_stat {
58         /* last value retrieved from pcap */
59         unsigned int pcap;
60         /* stores values lost by pcap stop or rollover */
61         unsigned long mnemonic;
62         /* value on last reset */
63         unsigned long reset;
64 };
65
66 struct pcap_rx_queue {
67         uint16_t port_id;
68         uint16_t queue_id;
69         struct rte_mempool *mb_pool;
70         struct queue_stat rx_stat;
71         struct queue_missed_stat missed_stat;
72         char name[PATH_MAX];
73         char type[ETH_PCAP_ARG_MAXLEN];
74
75         /* Contains pre-generated packets to be looped through */
76         struct rte_ring *pkts;
77 };
78
79 struct pcap_tx_queue {
80         uint16_t port_id;
81         uint16_t queue_id;
82         struct queue_stat tx_stat;
83         char name[PATH_MAX];
84         char type[ETH_PCAP_ARG_MAXLEN];
85 };
86
87 struct pmd_internals {
88         struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
89         struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
90         char devargs[ETH_PCAP_ARG_MAXLEN];
91         struct rte_ether_addr eth_addr;
92         int if_index;
93         int single_iface;
94         int phy_mac;
95         unsigned int infinite_rx;
96 };
97
98 struct pmd_process_private {
99         pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
100         pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
101         pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES];
102 };
103
104 struct pmd_devargs {
105         unsigned int num_of_queue;
106         struct devargs_queue {
107                 pcap_dumper_t *dumper;
108                 pcap_t *pcap;
109                 const char *name;
110                 const char *type;
111         } queue[RTE_PMD_PCAP_MAX_QUEUES];
112         int phy_mac;
113 };
114
115 struct pmd_devargs_all {
116         struct pmd_devargs rx_queues;
117         struct pmd_devargs tx_queues;
118         int single_iface;
119         unsigned int is_tx_pcap;
120         unsigned int is_tx_iface;
121         unsigned int is_rx_pcap;
122         unsigned int is_rx_iface;
123         unsigned int infinite_rx;
124 };
125
126 static const char *valid_arguments[] = {
127         ETH_PCAP_RX_PCAP_ARG,
128         ETH_PCAP_TX_PCAP_ARG,
129         ETH_PCAP_RX_IFACE_ARG,
130         ETH_PCAP_RX_IFACE_IN_ARG,
131         ETH_PCAP_TX_IFACE_ARG,
132         ETH_PCAP_IFACE_ARG,
133         ETH_PCAP_PHY_MAC_ARG,
134         ETH_PCAP_INFINITE_RX_ARG,
135         NULL
136 };
137
138 static struct rte_eth_link pmd_link = {
139                 .link_speed = ETH_SPEED_NUM_10G,
140                 .link_duplex = ETH_LINK_FULL_DUPLEX,
141                 .link_status = ETH_LINK_DOWN,
142                 .link_autoneg = ETH_LINK_FIXED,
143 };
144
145 RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE);
146
147 static struct queue_missed_stat*
148 queue_missed_stat_update(struct rte_eth_dev *dev, unsigned int qid)
149 {
150         struct pmd_internals *internals = dev->data->dev_private;
151         struct queue_missed_stat *missed_stat =
152                         &internals->rx_queue[qid].missed_stat;
153         const struct pmd_process_private *pp = dev->process_private;
154         pcap_t *pcap = pp->rx_pcap[qid];
155         struct pcap_stat stat;
156
157         if (!pcap || (pcap_stats(pcap, &stat) != 0))
158                 return missed_stat;
159
160         /* rollover check - best effort fixup assuming single rollover */
161         if (stat.ps_drop < missed_stat->pcap)
162                 missed_stat->mnemonic += UINT_MAX;
163         missed_stat->pcap = stat.ps_drop;
164
165         return missed_stat;
166 }
167
168 static void
169 queue_missed_stat_on_stop_update(struct rte_eth_dev *dev, unsigned int qid)
170 {
171         struct queue_missed_stat *missed_stat =
172                         queue_missed_stat_update(dev, qid);
173
174         missed_stat->mnemonic += missed_stat->pcap;
175         missed_stat->pcap = 0;
176 }
177
178 static void
179 queue_missed_stat_reset(struct rte_eth_dev *dev, unsigned int qid)
180 {
181         struct queue_missed_stat *missed_stat =
182                         queue_missed_stat_update(dev, qid);
183
184         missed_stat->reset = missed_stat->pcap;
185         missed_stat->mnemonic = 0;
186 }
187
188 static unsigned long
189 queue_missed_stat_get(struct rte_eth_dev *dev, unsigned int qid)
190 {
191         const struct queue_missed_stat *missed_stat =
192                         queue_missed_stat_update(dev, qid);
193
194         return missed_stat->pcap + missed_stat->mnemonic - missed_stat->reset;
195 }
196
197 static int
198 eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
199                 const u_char *data, uint16_t data_len)
200 {
201         /* Copy the first segment. */
202         uint16_t len = rte_pktmbuf_tailroom(mbuf);
203         struct rte_mbuf *m = mbuf;
204
205         rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
206         data_len -= len;
207         data += len;
208
209         while (data_len > 0) {
210                 /* Allocate next mbuf and point to that. */
211                 m->next = rte_pktmbuf_alloc(mb_pool);
212
213                 if (unlikely(!m->next))
214                         return -1;
215
216                 m = m->next;
217
218                 /* Headroom is not needed in chained mbufs. */
219                 rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
220                 m->pkt_len = 0;
221                 m->data_len = 0;
222
223                 /* Copy next segment. */
224                 len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len);
225                 rte_memcpy(rte_pktmbuf_append(m, len), data, len);
226
227                 mbuf->nb_segs++;
228                 data_len -= len;
229                 data += len;
230         }
231
232         return mbuf->nb_segs;
233 }
234
235 static uint16_t
236 eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
237 {
238         int i;
239         struct pcap_rx_queue *pcap_q = queue;
240         uint32_t rx_bytes = 0;
241
242         if (unlikely(nb_pkts == 0))
243                 return 0;
244
245         if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0)
246                 return 0;
247
248         for (i = 0; i < nb_pkts; i++) {
249                 struct rte_mbuf *pcap_buf;
250                 int err = rte_ring_dequeue(pcap_q->pkts, (void **)&pcap_buf);
251                 if (err)
252                         return i;
253
254                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *),
255                                 rte_pktmbuf_mtod(pcap_buf, void *),
256                                 pcap_buf->data_len);
257                 bufs[i]->data_len = pcap_buf->data_len;
258                 bufs[i]->pkt_len = pcap_buf->pkt_len;
259                 bufs[i]->port = pcap_q->port_id;
260                 rx_bytes += pcap_buf->data_len;
261
262                 /* Enqueue packet back on ring to allow infinite rx. */
263                 rte_ring_enqueue(pcap_q->pkts, pcap_buf);
264         }
265
266         pcap_q->rx_stat.pkts += i;
267         pcap_q->rx_stat.bytes += rx_bytes;
268
269         return i;
270 }
271
272 static uint16_t
273 eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
274 {
275         unsigned int i;
276         struct pcap_pkthdr header;
277         struct pmd_process_private *pp;
278         const u_char *packet;
279         struct rte_mbuf *mbuf;
280         struct pcap_rx_queue *pcap_q = queue;
281         uint16_t num_rx = 0;
282         uint32_t rx_bytes = 0;
283         pcap_t *pcap;
284
285         pp = rte_eth_devices[pcap_q->port_id].process_private;
286         pcap = pp->rx_pcap[pcap_q->queue_id];
287
288         if (unlikely(pcap == NULL || nb_pkts == 0))
289                 return 0;
290
291         /* Reads the given number of packets from the pcap file one by one
292          * and copies the packet data into a newly allocated mbuf to return.
293          */
294         for (i = 0; i < nb_pkts; i++) {
295                 /* Get the next PCAP packet */
296                 packet = pcap_next(pcap, &header);
297                 if (unlikely(packet == NULL))
298                         break;
299
300                 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
301                 if (unlikely(mbuf == NULL)) {
302                         pcap_q->rx_stat.rx_nombuf++;
303                         break;
304                 }
305
306                 if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) {
307                         /* pcap packet will fit in the mbuf, can copy it */
308                         rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
309                                         header.caplen);
310                         mbuf->data_len = (uint16_t)header.caplen;
311                 } else {
312                         /* Try read jumbo frame into multi mbufs. */
313                         if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
314                                                        mbuf,
315                                                        packet,
316                                                        header.caplen) == -1)) {
317                                 pcap_q->rx_stat.err_pkts++;
318                                 rte_pktmbuf_free(mbuf);
319                                 break;
320                         }
321                 }
322
323                 mbuf->pkt_len = (uint16_t)header.caplen;
324                 *RTE_MBUF_DYNFIELD(mbuf, timestamp_dynfield_offset,
325                         rte_mbuf_timestamp_t *) =
326                                 (uint64_t)header.ts.tv_sec * 1000000 +
327                                 header.ts.tv_usec;
328                 mbuf->ol_flags |= timestamp_rx_dynflag;
329                 mbuf->port = pcap_q->port_id;
330                 bufs[num_rx] = mbuf;
331                 num_rx++;
332                 rx_bytes += header.caplen;
333         }
334         pcap_q->rx_stat.pkts += num_rx;
335         pcap_q->rx_stat.bytes += rx_bytes;
336
337         return num_rx;
338 }
339
340 static uint16_t
341 eth_null_rx(void *queue __rte_unused,
342                 struct rte_mbuf **bufs __rte_unused,
343                 uint16_t nb_pkts __rte_unused)
344 {
345         return 0;
346 }
347
348 #define NSEC_PER_SEC    1000000000L
349
350 /*
351  * This function stores nanoseconds in `tv_usec` field of `struct timeval`,
352  * because `ts` goes directly to nanosecond-precision dump.
353  */
354 static inline void
355 calculate_timestamp(struct timeval *ts) {
356         uint64_t cycles;
357         struct timespec cur_time;
358
359         cycles = rte_get_timer_cycles() - start_cycles;
360         cur_time.tv_sec = cycles / hz;
361         cur_time.tv_nsec = (cycles % hz) * NSEC_PER_SEC / hz;
362
363         ts->tv_sec = start_time.tv_sec + cur_time.tv_sec;
364         ts->tv_usec = start_time.tv_nsec + cur_time.tv_nsec;
365         if (ts->tv_usec >= NSEC_PER_SEC) {
366                 ts->tv_usec -= NSEC_PER_SEC;
367                 ts->tv_sec += 1;
368         }
369 }
370
371 /*
372  * Callback to handle writing packets to a pcap file.
373  */
374 static uint16_t
375 eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
376 {
377         unsigned int i;
378         struct rte_mbuf *mbuf;
379         struct pmd_process_private *pp;
380         struct pcap_tx_queue *dumper_q = queue;
381         uint16_t num_tx = 0;
382         uint32_t tx_bytes = 0;
383         struct pcap_pkthdr header;
384         pcap_dumper_t *dumper;
385         unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
386         size_t len, caplen;
387
388         pp = rte_eth_devices[dumper_q->port_id].process_private;
389         dumper = pp->tx_dumper[dumper_q->queue_id];
390
391         if (dumper == NULL || nb_pkts == 0)
392                 return 0;
393
394         /* writes the nb_pkts packets to the previously opened pcap file
395          * dumper */
396         for (i = 0; i < nb_pkts; i++) {
397                 mbuf = bufs[i];
398                 len = caplen = rte_pktmbuf_pkt_len(mbuf);
399                 if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
400                                 len > sizeof(temp_data))) {
401                         caplen = sizeof(temp_data);
402                 }
403
404                 calculate_timestamp(&header.ts);
405                 header.len = len;
406                 header.caplen = caplen;
407                 /* rte_pktmbuf_read() returns a pointer to the data directly
408                  * in the mbuf (when the mbuf is contiguous) or, otherwise,
409                  * a pointer to temp_data after copying into it.
410                  */
411                 pcap_dump((u_char *)dumper, &header,
412                         rte_pktmbuf_read(mbuf, 0, caplen, temp_data));
413
414                 num_tx++;
415                 tx_bytes += caplen;
416                 rte_pktmbuf_free(mbuf);
417         }
418
419         /*
420          * Since there's no place to hook a callback when the forwarding
421          * process stops and to make sure the pcap file is actually written,
422          * we flush the pcap dumper within each burst.
423          */
424         pcap_dump_flush(dumper);
425         dumper_q->tx_stat.pkts += num_tx;
426         dumper_q->tx_stat.bytes += tx_bytes;
427         dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
428
429         return nb_pkts;
430 }
431
432 /*
433  * Callback to handle dropping packets in the infinite rx case.
434  */
435 static uint16_t
436 eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
437 {
438         unsigned int i;
439         uint32_t tx_bytes = 0;
440         struct pcap_tx_queue *tx_queue = queue;
441
442         if (unlikely(nb_pkts == 0))
443                 return 0;
444
445         for (i = 0; i < nb_pkts; i++) {
446                 tx_bytes += bufs[i]->pkt_len;
447                 rte_pktmbuf_free(bufs[i]);
448         }
449
450         tx_queue->tx_stat.pkts += nb_pkts;
451         tx_queue->tx_stat.bytes += tx_bytes;
452
453         return i;
454 }
455
456 /*
457  * Callback to handle sending packets through a real NIC.
458  */
459 static uint16_t
460 eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
461 {
462         unsigned int i;
463         int ret;
464         struct rte_mbuf *mbuf;
465         struct pmd_process_private *pp;
466         struct pcap_tx_queue *tx_queue = queue;
467         uint16_t num_tx = 0;
468         uint32_t tx_bytes = 0;
469         pcap_t *pcap;
470         unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
471         size_t len;
472
473         pp = rte_eth_devices[tx_queue->port_id].process_private;
474         pcap = pp->tx_pcap[tx_queue->queue_id];
475
476         if (unlikely(nb_pkts == 0 || pcap == NULL))
477                 return 0;
478
479         for (i = 0; i < nb_pkts; i++) {
480                 mbuf = bufs[i];
481                 len = rte_pktmbuf_pkt_len(mbuf);
482                 if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
483                                 len > sizeof(temp_data))) {
484                         PMD_LOG(ERR,
485                                 "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).",
486                                 len, sizeof(temp_data));
487                         rte_pktmbuf_free(mbuf);
488                         continue;
489                 }
490
491                 /* rte_pktmbuf_read() returns a pointer to the data directly
492                  * in the mbuf (when the mbuf is contiguous) or, otherwise,
493                  * a pointer to temp_data after copying into it.
494                  */
495                 ret = pcap_sendpacket(pcap,
496                         rte_pktmbuf_read(mbuf, 0, len, temp_data), len);
497                 if (unlikely(ret != 0))
498                         break;
499                 num_tx++;
500                 tx_bytes += len;
501                 rte_pktmbuf_free(mbuf);
502         }
503
504         tx_queue->tx_stat.pkts += num_tx;
505         tx_queue->tx_stat.bytes += tx_bytes;
506         tx_queue->tx_stat.err_pkts += i - num_tx;
507
508         return i;
509 }
510
511 /*
512  * pcap_open_live wrapper function
513  */
514 static inline int
515 open_iface_live(const char *iface, pcap_t **pcap) {
516         *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
517                         RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
518
519         if (*pcap == NULL) {
520                 PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf);
521                 return -1;
522         }
523
524         return 0;
525 }
526
527 static int
528 open_single_iface(const char *iface, pcap_t **pcap)
529 {
530         if (open_iface_live(iface, pcap) < 0) {
531                 PMD_LOG(ERR, "Couldn't open interface %s", iface);
532                 return -1;
533         }
534
535         return 0;
536 }
537
538 static int
539 open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
540 {
541         pcap_t *tx_pcap;
542
543         /*
544          * We need to create a dummy empty pcap_t to use it
545          * with pcap_dump_open(). We create big enough an Ethernet
546          * pcap holder.
547          */
548         tx_pcap = pcap_open_dead_with_tstamp_precision(DLT_EN10MB,
549                         RTE_ETH_PCAP_SNAPSHOT_LEN, PCAP_TSTAMP_PRECISION_NANO);
550         if (tx_pcap == NULL) {
551                 PMD_LOG(ERR, "Couldn't create dead pcap");
552                 return -1;
553         }
554
555         /* The dumper is created using the previous pcap_t reference */
556         *dumper = pcap_dump_open(tx_pcap, pcap_filename);
557         if (*dumper == NULL) {
558                 pcap_close(tx_pcap);
559                 PMD_LOG(ERR, "Couldn't open %s for writing.",
560                         pcap_filename);
561                 return -1;
562         }
563
564         pcap_close(tx_pcap);
565         return 0;
566 }
567
568 static int
569 open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
570 {
571         *pcap = pcap_open_offline(pcap_filename, errbuf);
572         if (*pcap == NULL) {
573                 PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename,
574                         errbuf);
575                 return -1;
576         }
577
578         return 0;
579 }
580
581 static uint64_t
582 count_packets_in_pcap(pcap_t **pcap, struct pcap_rx_queue *pcap_q)
583 {
584         const u_char *packet;
585         struct pcap_pkthdr header;
586         uint64_t pcap_pkt_count = 0;
587
588         while ((packet = pcap_next(*pcap, &header)))
589                 pcap_pkt_count++;
590
591         /* The pcap is reopened so it can be used as normal later. */
592         pcap_close(*pcap);
593         *pcap = NULL;
594         open_single_rx_pcap(pcap_q->name, pcap);
595
596         return pcap_pkt_count;
597 }
598
599 static int
600 eth_dev_start(struct rte_eth_dev *dev)
601 {
602         unsigned int i;
603         struct pmd_internals *internals = dev->data->dev_private;
604         struct pmd_process_private *pp = dev->process_private;
605         struct pcap_tx_queue *tx;
606         struct pcap_rx_queue *rx;
607
608         /* Special iface case. Single pcap is open and shared between tx/rx. */
609         if (internals->single_iface) {
610                 tx = &internals->tx_queue[0];
611                 rx = &internals->rx_queue[0];
612
613                 if (!pp->tx_pcap[0] &&
614                         strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
615                         if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0)
616                                 return -1;
617                         pp->rx_pcap[0] = pp->tx_pcap[0];
618                 }
619
620                 goto status_up;
621         }
622
623         /* If not open already, open tx pcaps/dumpers */
624         for (i = 0; i < dev->data->nb_tx_queues; i++) {
625                 tx = &internals->tx_queue[i];
626
627                 if (!pp->tx_dumper[i] &&
628                                 strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
629                         if (open_single_tx_pcap(tx->name,
630                                 &pp->tx_dumper[i]) < 0)
631                                 return -1;
632                 } else if (!pp->tx_pcap[i] &&
633                                 strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
634                         if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0)
635                                 return -1;
636                 }
637         }
638
639         /* If not open already, open rx pcaps */
640         for (i = 0; i < dev->data->nb_rx_queues; i++) {
641                 rx = &internals->rx_queue[i];
642
643                 if (pp->rx_pcap[i] != NULL)
644                         continue;
645
646                 if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
647                         if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0)
648                                 return -1;
649                 } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
650                         if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0)
651                                 return -1;
652                 }
653         }
654
655 status_up:
656         for (i = 0; i < dev->data->nb_rx_queues; i++)
657                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
658
659         for (i = 0; i < dev->data->nb_tx_queues; i++)
660                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
661
662         dev->data->dev_link.link_status = ETH_LINK_UP;
663
664         return 0;
665 }
666
667 /*
668  * This function gets called when the current port gets stopped.
669  * Is the only place for us to close all the tx streams dumpers.
670  * If not called the dumpers will be flushed within each tx burst.
671  */
672 static int
673 eth_dev_stop(struct rte_eth_dev *dev)
674 {
675         unsigned int i;
676         struct pmd_internals *internals = dev->data->dev_private;
677         struct pmd_process_private *pp = dev->process_private;
678
679         /* Special iface case. Single pcap is open and shared between tx/rx. */
680         if (internals->single_iface) {
681                 queue_missed_stat_on_stop_update(dev, 0);
682                 if (pp->tx_pcap[0] != NULL) {
683                         pcap_close(pp->tx_pcap[0]);
684                         pp->tx_pcap[0] = NULL;
685                         pp->rx_pcap[0] = NULL;
686                 }
687                 goto status_down;
688         }
689
690         for (i = 0; i < dev->data->nb_tx_queues; i++) {
691                 if (pp->tx_dumper[i] != NULL) {
692                         pcap_dump_close(pp->tx_dumper[i]);
693                         pp->tx_dumper[i] = NULL;
694                 }
695
696                 if (pp->tx_pcap[i] != NULL) {
697                         pcap_close(pp->tx_pcap[i]);
698                         pp->tx_pcap[i] = NULL;
699                 }
700         }
701
702         for (i = 0; i < dev->data->nb_rx_queues; i++) {
703                 if (pp->rx_pcap[i] != NULL) {
704                         queue_missed_stat_on_stop_update(dev, i);
705                         pcap_close(pp->rx_pcap[i]);
706                         pp->rx_pcap[i] = NULL;
707                 }
708         }
709
710 status_down:
711         for (i = 0; i < dev->data->nb_rx_queues; i++)
712                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
713
714         for (i = 0; i < dev->data->nb_tx_queues; i++)
715                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
716
717         dev->data->dev_link.link_status = ETH_LINK_DOWN;
718
719         return 0;
720 }
721
722 static int
723 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
724 {
725         return 0;
726 }
727
728 static int
729 eth_dev_info(struct rte_eth_dev *dev,
730                 struct rte_eth_dev_info *dev_info)
731 {
732         struct pmd_internals *internals = dev->data->dev_private;
733
734         dev_info->if_index = internals->if_index;
735         dev_info->max_mac_addrs = 1;
736         dev_info->max_rx_pktlen = (uint32_t) -1;
737         dev_info->max_rx_queues = dev->data->nb_rx_queues;
738         dev_info->max_tx_queues = dev->data->nb_tx_queues;
739         dev_info->min_rx_bufsize = 0;
740
741         return 0;
742 }
743
744 static int
745 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
746 {
747         unsigned int i;
748         unsigned long rx_packets_total = 0, rx_bytes_total = 0;
749         unsigned long rx_missed_total = 0;
750         unsigned long rx_nombuf_total = 0, rx_err_total = 0;
751         unsigned long tx_packets_total = 0, tx_bytes_total = 0;
752         unsigned long tx_packets_err_total = 0;
753         const struct pmd_internals *internal = dev->data->dev_private;
754
755         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
756                         i < dev->data->nb_rx_queues; i++) {
757                 stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
758                 stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
759                 rx_nombuf_total += internal->rx_queue[i].rx_stat.rx_nombuf;
760                 rx_err_total += internal->rx_queue[i].rx_stat.err_pkts;
761                 rx_packets_total += stats->q_ipackets[i];
762                 rx_bytes_total += stats->q_ibytes[i];
763                 rx_missed_total += queue_missed_stat_get(dev, i);
764         }
765
766         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
767                         i < dev->data->nb_tx_queues; i++) {
768                 stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
769                 stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
770                 tx_packets_total += stats->q_opackets[i];
771                 tx_bytes_total += stats->q_obytes[i];
772                 tx_packets_err_total += internal->tx_queue[i].tx_stat.err_pkts;
773         }
774
775         stats->ipackets = rx_packets_total;
776         stats->ibytes = rx_bytes_total;
777         stats->imissed = rx_missed_total;
778         stats->ierrors = rx_err_total;
779         stats->rx_nombuf = rx_nombuf_total;
780         stats->opackets = tx_packets_total;
781         stats->obytes = tx_bytes_total;
782         stats->oerrors = tx_packets_err_total;
783
784         return 0;
785 }
786
787 static int
788 eth_stats_reset(struct rte_eth_dev *dev)
789 {
790         unsigned int i;
791         struct pmd_internals *internal = dev->data->dev_private;
792
793         for (i = 0; i < dev->data->nb_rx_queues; i++) {
794                 internal->rx_queue[i].rx_stat.pkts = 0;
795                 internal->rx_queue[i].rx_stat.bytes = 0;
796                 internal->rx_queue[i].rx_stat.err_pkts = 0;
797                 internal->rx_queue[i].rx_stat.rx_nombuf = 0;
798                 queue_missed_stat_reset(dev, i);
799         }
800
801         for (i = 0; i < dev->data->nb_tx_queues; i++) {
802                 internal->tx_queue[i].tx_stat.pkts = 0;
803                 internal->tx_queue[i].tx_stat.bytes = 0;
804                 internal->tx_queue[i].tx_stat.err_pkts = 0;
805         }
806
807         return 0;
808 }
809
810 static inline void
811 infinite_rx_ring_free(struct rte_ring *pkts)
812 {
813         struct rte_mbuf *bufs;
814
815         while (!rte_ring_dequeue(pkts, (void **)&bufs))
816                 rte_pktmbuf_free(bufs);
817
818         rte_ring_free(pkts);
819 }
820
821 static int
822 eth_dev_close(struct rte_eth_dev *dev)
823 {
824         unsigned int i;
825         struct pmd_internals *internals = dev->data->dev_private;
826
827         PMD_LOG(INFO, "Closing pcap ethdev on NUMA socket %d",
828                         rte_socket_id());
829
830         eth_dev_stop(dev);
831
832         rte_free(dev->process_private);
833
834         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
835                 return 0;
836
837         /* Device wide flag, but cleanup must be performed per queue. */
838         if (internals->infinite_rx) {
839                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
840                         struct pcap_rx_queue *pcap_q = &internals->rx_queue[i];
841
842                         /*
843                          * 'pcap_q->pkts' can be NULL if 'eth_dev_close()'
844                          * called before 'eth_rx_queue_setup()' has been called
845                          */
846                         if (pcap_q->pkts == NULL)
847                                 continue;
848
849                         infinite_rx_ring_free(pcap_q->pkts);
850                 }
851         }
852
853         if (internals->phy_mac == 0)
854                 /* not dynamically allocated, must not be freed */
855                 dev->data->mac_addrs = NULL;
856
857         return 0;
858 }
859
860 static void
861 eth_queue_release(void *q __rte_unused)
862 {
863 }
864
865 static int
866 eth_link_update(struct rte_eth_dev *dev __rte_unused,
867                 int wait_to_complete __rte_unused)
868 {
869         return 0;
870 }
871
872 static int
873 eth_rx_queue_setup(struct rte_eth_dev *dev,
874                 uint16_t rx_queue_id,
875                 uint16_t nb_rx_desc __rte_unused,
876                 unsigned int socket_id __rte_unused,
877                 const struct rte_eth_rxconf *rx_conf __rte_unused,
878                 struct rte_mempool *mb_pool)
879 {
880         struct pmd_internals *internals = dev->data->dev_private;
881         struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
882
883         pcap_q->mb_pool = mb_pool;
884         pcap_q->port_id = dev->data->port_id;
885         pcap_q->queue_id = rx_queue_id;
886         dev->data->rx_queues[rx_queue_id] = pcap_q;
887
888         if (internals->infinite_rx) {
889                 struct pmd_process_private *pp;
890                 char ring_name[RTE_RING_NAMESIZE];
891                 static uint32_t ring_number;
892                 uint64_t pcap_pkt_count = 0;
893                 struct rte_mbuf *bufs[1];
894                 pcap_t **pcap;
895
896                 pp = rte_eth_devices[pcap_q->port_id].process_private;
897                 pcap = &pp->rx_pcap[pcap_q->queue_id];
898
899                 if (unlikely(*pcap == NULL))
900                         return -ENOENT;
901
902                 pcap_pkt_count = count_packets_in_pcap(pcap, pcap_q);
903
904                 snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu32,
905                                 ring_number);
906
907                 pcap_q->pkts = rte_ring_create(ring_name,
908                                 rte_align64pow2(pcap_pkt_count + 1), 0,
909                                 RING_F_SP_ENQ | RING_F_SC_DEQ);
910                 ring_number++;
911                 if (!pcap_q->pkts)
912                         return -ENOENT;
913
914                 /* Fill ring with packets from PCAP file one by one. */
915                 while (eth_pcap_rx(pcap_q, bufs, 1)) {
916                         /* Check for multiseg mbufs. */
917                         if (bufs[0]->nb_segs != 1) {
918                                 infinite_rx_ring_free(pcap_q->pkts);
919                                 PMD_LOG(ERR,
920                                         "Multiseg mbufs are not supported in infinite_rx mode.");
921                                 return -EINVAL;
922                         }
923
924                         rte_ring_enqueue_bulk(pcap_q->pkts,
925                                         (void * const *)bufs, 1, NULL);
926                 }
927
928                 if (rte_ring_count(pcap_q->pkts) < pcap_pkt_count) {
929                         infinite_rx_ring_free(pcap_q->pkts);
930                         PMD_LOG(ERR,
931                                 "Not enough mbufs to accommodate packets in pcap file. "
932                                 "At least %" PRIu64 " mbufs per queue is required.",
933                                 pcap_pkt_count);
934                         return -EINVAL;
935                 }
936
937                 /*
938                  * Reset the stats for this queue since eth_pcap_rx calls above
939                  * didn't result in the application receiving packets.
940                  */
941                 pcap_q->rx_stat.pkts = 0;
942                 pcap_q->rx_stat.bytes = 0;
943         }
944
945         return 0;
946 }
947
948 static int
949 eth_tx_queue_setup(struct rte_eth_dev *dev,
950                 uint16_t tx_queue_id,
951                 uint16_t nb_tx_desc __rte_unused,
952                 unsigned int socket_id __rte_unused,
953                 const struct rte_eth_txconf *tx_conf __rte_unused)
954 {
955         struct pmd_internals *internals = dev->data->dev_private;
956         struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
957
958         pcap_q->port_id = dev->data->port_id;
959         pcap_q->queue_id = tx_queue_id;
960         dev->data->tx_queues[tx_queue_id] = pcap_q;
961
962         return 0;
963 }
964
965 static int
966 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
967 {
968         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
969
970         return 0;
971 }
972
973 static int
974 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
975 {
976         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
977
978         return 0;
979 }
980
981 static int
982 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
983 {
984         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
985
986         return 0;
987 }
988
989 static int
990 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
991 {
992         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
993
994         return 0;
995 }
996
997 static const struct eth_dev_ops ops = {
998         .dev_start = eth_dev_start,
999         .dev_stop = eth_dev_stop,
1000         .dev_close = eth_dev_close,
1001         .dev_configure = eth_dev_configure,
1002         .dev_infos_get = eth_dev_info,
1003         .rx_queue_setup = eth_rx_queue_setup,
1004         .tx_queue_setup = eth_tx_queue_setup,
1005         .rx_queue_start = eth_rx_queue_start,
1006         .tx_queue_start = eth_tx_queue_start,
1007         .rx_queue_stop = eth_rx_queue_stop,
1008         .tx_queue_stop = eth_tx_queue_stop,
1009         .rx_queue_release = eth_queue_release,
1010         .tx_queue_release = eth_queue_release,
1011         .link_update = eth_link_update,
1012         .stats_get = eth_stats_get,
1013         .stats_reset = eth_stats_reset,
1014 };
1015
1016 static int
1017 add_queue(struct pmd_devargs *pmd, const char *name, const char *type,
1018                 pcap_t *pcap, pcap_dumper_t *dumper)
1019 {
1020         if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
1021                 return -1;
1022         if (pcap)
1023                 pmd->queue[pmd->num_of_queue].pcap = pcap;
1024         if (dumper)
1025                 pmd->queue[pmd->num_of_queue].dumper = dumper;
1026         pmd->queue[pmd->num_of_queue].name = name;
1027         pmd->queue[pmd->num_of_queue].type = type;
1028         pmd->num_of_queue++;
1029         return 0;
1030 }
1031
1032 /*
1033  * Function handler that opens the pcap file for reading a stores a
1034  * reference of it for use it later on.
1035  */
1036 static int
1037 open_rx_pcap(const char *key, const char *value, void *extra_args)
1038 {
1039         const char *pcap_filename = value;
1040         struct pmd_devargs *rx = extra_args;
1041         pcap_t *pcap = NULL;
1042
1043         if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
1044                 return -1;
1045
1046         if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) {
1047                 pcap_close(pcap);
1048                 return -1;
1049         }
1050
1051         return 0;
1052 }
1053
1054 /*
1055  * Opens a pcap file for writing and stores a reference to it
1056  * for use it later on.
1057  */
1058 static int
1059 open_tx_pcap(const char *key, const char *value, void *extra_args)
1060 {
1061         const char *pcap_filename = value;
1062         struct pmd_devargs *dumpers = extra_args;
1063         pcap_dumper_t *dumper;
1064
1065         if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
1066                 return -1;
1067
1068         if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) {
1069                 pcap_dump_close(dumper);
1070                 return -1;
1071         }
1072
1073         return 0;
1074 }
1075
1076 /*
1077  * Opens an interface for reading and writing
1078  */
1079 static inline int
1080 open_rx_tx_iface(const char *key, const char *value, void *extra_args)
1081 {
1082         const char *iface = value;
1083         struct pmd_devargs *tx = extra_args;
1084         pcap_t *pcap = NULL;
1085
1086         if (open_single_iface(iface, &pcap) < 0)
1087                 return -1;
1088
1089         tx->queue[0].pcap = pcap;
1090         tx->queue[0].name = iface;
1091         tx->queue[0].type = key;
1092
1093         return 0;
1094 }
1095
1096 static inline int
1097 set_iface_direction(const char *iface, pcap_t *pcap,
1098                 pcap_direction_t direction)
1099 {
1100         const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT";
1101         if (pcap_setdirection(pcap, direction) < 0) {
1102                 PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n",
1103                                 iface, direction_str, pcap_geterr(pcap));
1104                 return -1;
1105         }
1106         PMD_LOG(INFO, "Setting %s pcap direction %s\n",
1107                         iface, direction_str);
1108         return 0;
1109 }
1110
1111 static inline int
1112 open_iface(const char *key, const char *value, void *extra_args)
1113 {
1114         const char *iface = value;
1115         struct pmd_devargs *pmd = extra_args;
1116         pcap_t *pcap = NULL;
1117
1118         if (open_single_iface(iface, &pcap) < 0)
1119                 return -1;
1120         if (add_queue(pmd, iface, key, pcap, NULL) < 0) {
1121                 pcap_close(pcap);
1122                 return -1;
1123         }
1124
1125         return 0;
1126 }
1127
1128 /*
1129  * Opens a NIC for reading packets from it
1130  */
1131 static inline int
1132 open_rx_iface(const char *key, const char *value, void *extra_args)
1133 {
1134         int ret = open_iface(key, value, extra_args);
1135         if (ret < 0)
1136                 return ret;
1137         if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) {
1138                 struct pmd_devargs *pmd = extra_args;
1139                 unsigned int qid = pmd->num_of_queue - 1;
1140
1141                 set_iface_direction(pmd->queue[qid].name,
1142                                 pmd->queue[qid].pcap,
1143                                 PCAP_D_IN);
1144         }
1145
1146         return 0;
1147 }
1148
1149 static inline int
1150 rx_iface_args_process(const char *key, const char *value, void *extra_args)
1151 {
1152         if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 ||
1153                         strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0)
1154                 return open_rx_iface(key, value, extra_args);
1155
1156         return 0;
1157 }
1158
1159 /*
1160  * Opens a NIC for writing packets to it
1161  */
1162 static int
1163 open_tx_iface(const char *key, const char *value, void *extra_args)
1164 {
1165         return open_iface(key, value, extra_args);
1166 }
1167
1168 static int
1169 select_phy_mac(const char *key __rte_unused, const char *value,
1170                 void *extra_args)
1171 {
1172         if (extra_args) {
1173                 const int phy_mac = atoi(value);
1174                 int *enable_phy_mac = extra_args;
1175
1176                 if (phy_mac)
1177                         *enable_phy_mac = 1;
1178         }
1179         return 0;
1180 }
1181
1182 static int
1183 get_infinite_rx_arg(const char *key __rte_unused,
1184                 const char *value, void *extra_args)
1185 {
1186         if (extra_args) {
1187                 const int infinite_rx = atoi(value);
1188                 int *enable_infinite_rx = extra_args;
1189
1190                 if (infinite_rx > 0)
1191                         *enable_infinite_rx = 1;
1192         }
1193         return 0;
1194 }
1195
1196 static int
1197 pmd_init_internals(struct rte_vdev_device *vdev,
1198                 const unsigned int nb_rx_queues,
1199                 const unsigned int nb_tx_queues,
1200                 struct pmd_internals **internals,
1201                 struct rte_eth_dev **eth_dev)
1202 {
1203         struct rte_eth_dev_data *data;
1204         struct pmd_process_private *pp;
1205         unsigned int numa_node = vdev->device.numa_node;
1206
1207         PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
1208                 numa_node);
1209
1210         pp = (struct pmd_process_private *)
1211                 rte_zmalloc(NULL, sizeof(struct pmd_process_private),
1212                                 RTE_CACHE_LINE_SIZE);
1213
1214         if (pp == NULL) {
1215                 PMD_LOG(ERR,
1216                         "Failed to allocate memory for process private");
1217                 return -1;
1218         }
1219
1220         /* reserve an ethdev entry */
1221         *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
1222         if (!(*eth_dev)) {
1223                 rte_free(pp);
1224                 return -1;
1225         }
1226         (*eth_dev)->process_private = pp;
1227         /* now put it all together
1228          * - store queue data in internals,
1229          * - store numa_node info in eth_dev
1230          * - point eth_dev_data to internals
1231          * - and point eth_dev structure to new eth_dev_data structure
1232          */
1233         *internals = (*eth_dev)->data->dev_private;
1234         /*
1235          * Interface MAC = 02:70:63:61:70:<iface_idx>
1236          * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx'
1237          * where the middle 4 characters are converted to hex.
1238          */
1239         (*internals)->eth_addr = (struct rte_ether_addr) {
1240                 .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ }
1241         };
1242         (*internals)->phy_mac = 0;
1243         data = (*eth_dev)->data;
1244         data->nb_rx_queues = (uint16_t)nb_rx_queues;
1245         data->nb_tx_queues = (uint16_t)nb_tx_queues;
1246         data->dev_link = pmd_link;
1247         data->mac_addrs = &(*internals)->eth_addr;
1248         data->promiscuous = 1;
1249         data->all_multicast = 1;
1250         data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1251
1252         /*
1253          * NOTE: we'll replace the data element, of originally allocated
1254          * eth_dev so the rings are local per-process
1255          */
1256         (*eth_dev)->dev_ops = &ops;
1257
1258         strlcpy((*internals)->devargs, rte_vdev_device_args(vdev),
1259                         ETH_PCAP_ARG_MAXLEN);
1260
1261         return 0;
1262 }
1263
1264 static int
1265 eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev,
1266                 const unsigned int numa_node)
1267 {
1268         void *mac_addrs;
1269         struct rte_ether_addr mac;
1270
1271         if (osdep_iface_mac_get(if_name, &mac) < 0)
1272                 return -1;
1273
1274         mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
1275         if (mac_addrs == NULL)
1276                 return -1;
1277
1278         PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1279         rte_memcpy(mac_addrs, mac.addr_bytes, RTE_ETHER_ADDR_LEN);
1280         eth_dev->data->mac_addrs = mac_addrs;
1281         return 0;
1282 }
1283
1284 static int
1285 eth_from_pcaps_common(struct rte_vdev_device *vdev,
1286                 struct pmd_devargs_all *devargs_all,
1287                 struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
1288 {
1289         struct pmd_process_private *pp;
1290         struct pmd_devargs *rx_queues = &devargs_all->rx_queues;
1291         struct pmd_devargs *tx_queues = &devargs_all->tx_queues;
1292         const unsigned int nb_rx_queues = rx_queues->num_of_queue;
1293         const unsigned int nb_tx_queues = tx_queues->num_of_queue;
1294         unsigned int i;
1295
1296         if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals,
1297                         eth_dev) < 0)
1298                 return -1;
1299
1300         pp = (*eth_dev)->process_private;
1301         for (i = 0; i < nb_rx_queues; i++) {
1302                 struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
1303                 struct devargs_queue *queue = &rx_queues->queue[i];
1304
1305                 pp->rx_pcap[i] = queue->pcap;
1306                 strlcpy(rx->name, queue->name, sizeof(rx->name));
1307                 strlcpy(rx->type, queue->type, sizeof(rx->type));
1308         }
1309
1310         for (i = 0; i < nb_tx_queues; i++) {
1311                 struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
1312                 struct devargs_queue *queue = &tx_queues->queue[i];
1313
1314                 pp->tx_dumper[i] = queue->dumper;
1315                 pp->tx_pcap[i] = queue->pcap;
1316                 strlcpy(tx->name, queue->name, sizeof(tx->name));
1317                 strlcpy(tx->type, queue->type, sizeof(tx->type));
1318         }
1319
1320         return 0;
1321 }
1322
1323 static int
1324 eth_from_pcaps(struct rte_vdev_device *vdev,
1325                 struct pmd_devargs_all *devargs_all)
1326 {
1327         struct pmd_internals *internals = NULL;
1328         struct rte_eth_dev *eth_dev = NULL;
1329         struct pmd_devargs *rx_queues = &devargs_all->rx_queues;
1330         int single_iface = devargs_all->single_iface;
1331         unsigned int infinite_rx = devargs_all->infinite_rx;
1332         int ret;
1333
1334         ret = eth_from_pcaps_common(vdev, devargs_all, &internals, &eth_dev);
1335
1336         if (ret < 0)
1337                 return ret;
1338
1339         /* store weather we are using a single interface for rx/tx or not */
1340         internals->single_iface = single_iface;
1341
1342         if (single_iface) {
1343                 internals->if_index =
1344                         osdep_iface_index_get(rx_queues->queue[0].name);
1345
1346                 /* phy_mac arg is applied only only if "iface" devarg is provided */
1347                 if (rx_queues->phy_mac) {
1348                         if (eth_pcap_update_mac(rx_queues->queue[0].name,
1349                                         eth_dev, vdev->device.numa_node) == 0)
1350                                 internals->phy_mac = 1;
1351                 }
1352         }
1353
1354         internals->infinite_rx = infinite_rx;
1355         /* Assign rx ops. */
1356         if (infinite_rx)
1357                 eth_dev->rx_pkt_burst = eth_pcap_rx_infinite;
1358         else if (devargs_all->is_rx_pcap || devargs_all->is_rx_iface ||
1359                         single_iface)
1360                 eth_dev->rx_pkt_burst = eth_pcap_rx;
1361         else
1362                 eth_dev->rx_pkt_burst = eth_null_rx;
1363
1364         /* Assign tx ops. */
1365         if (devargs_all->is_tx_pcap)
1366                 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1367         else if (devargs_all->is_tx_iface || single_iface)
1368                 eth_dev->tx_pkt_burst = eth_pcap_tx;
1369         else
1370                 eth_dev->tx_pkt_burst = eth_tx_drop;
1371
1372         rte_eth_dev_probing_finish(eth_dev);
1373         return 0;
1374 }
1375
1376 static void
1377 eth_release_pcaps(struct pmd_devargs *pcaps,
1378                 struct pmd_devargs *dumpers,
1379                 int single_iface)
1380 {
1381         unsigned int i;
1382
1383         if (single_iface) {
1384                 if (pcaps->queue[0].pcap)
1385                         pcap_close(pcaps->queue[0].pcap);
1386                 return;
1387         }
1388
1389         for (i = 0; i < dumpers->num_of_queue; i++) {
1390                 if (dumpers->queue[i].dumper)
1391                         pcap_dump_close(dumpers->queue[i].dumper);
1392
1393                 if (dumpers->queue[i].pcap)
1394                         pcap_close(dumpers->queue[i].pcap);
1395         }
1396
1397         for (i = 0; i < pcaps->num_of_queue; i++) {
1398                 if (pcaps->queue[i].pcap)
1399                         pcap_close(pcaps->queue[i].pcap);
1400         }
1401 }
1402
1403 static int
1404 pmd_pcap_probe(struct rte_vdev_device *dev)
1405 {
1406         const char *name;
1407         struct rte_kvargs *kvlist;
1408         struct pmd_devargs pcaps = {0};
1409         struct pmd_devargs dumpers = {0};
1410         struct rte_eth_dev *eth_dev =  NULL;
1411         struct pmd_internals *internal;
1412         int ret = 0;
1413
1414         struct pmd_devargs_all devargs_all = {
1415                 .single_iface = 0,
1416                 .is_tx_pcap = 0,
1417                 .is_tx_iface = 0,
1418                 .infinite_rx = 0,
1419         };
1420
1421         name = rte_vdev_device_name(dev);
1422         PMD_LOG(INFO, "Initializing pmd_pcap for %s", name);
1423
1424         timespec_get(&start_time, TIME_UTC);
1425         start_cycles = rte_get_timer_cycles();
1426         hz = rte_get_timer_hz();
1427
1428         ret = rte_mbuf_dyn_rx_timestamp_register(&timestamp_dynfield_offset,
1429                         &timestamp_rx_dynflag);
1430         if (ret != 0) {
1431                 PMD_LOG(ERR, "Failed to register Rx timestamp field/flag");
1432                 return -1;
1433         }
1434
1435         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1436                 eth_dev = rte_eth_dev_attach_secondary(name);
1437                 if (!eth_dev) {
1438                         PMD_LOG(ERR, "Failed to probe %s", name);
1439                         return -1;
1440                 }
1441
1442                 internal = eth_dev->data->dev_private;
1443
1444                 kvlist = rte_kvargs_parse(internal->devargs, valid_arguments);
1445                 if (kvlist == NULL)
1446                         return -1;
1447         } else {
1448                 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
1449                                 valid_arguments);
1450                 if (kvlist == NULL)
1451                         return -1;
1452         }
1453
1454         /*
1455          * If iface argument is passed we open the NICs and use them for
1456          * reading / writing
1457          */
1458         if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
1459
1460                 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
1461                                 &open_rx_tx_iface, &pcaps);
1462                 if (ret < 0)
1463                         goto free_kvlist;
1464
1465                 dumpers.queue[0] = pcaps.queue[0];
1466
1467                 ret = rte_kvargs_process(kvlist, ETH_PCAP_PHY_MAC_ARG,
1468                                 &select_phy_mac, &pcaps.phy_mac);
1469                 if (ret < 0)
1470                         goto free_kvlist;
1471
1472                 dumpers.phy_mac = pcaps.phy_mac;
1473
1474                 devargs_all.single_iface = 1;
1475                 pcaps.num_of_queue = 1;
1476                 dumpers.num_of_queue = 1;
1477
1478                 goto create_eth;
1479         }
1480
1481         /*
1482          * We check whether we want to open a RX stream from a real NIC, a
1483          * pcap file or open a dummy RX stream
1484          */
1485         devargs_all.is_rx_pcap =
1486                 rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
1487         devargs_all.is_rx_iface =
1488                 (rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_ARG) +
1489                  rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_IN_ARG)) ? 1 : 0;
1490         pcaps.num_of_queue = 0;
1491
1492         devargs_all.is_tx_pcap =
1493                 rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
1494         devargs_all.is_tx_iface =
1495                 rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG) ? 1 : 0;
1496         dumpers.num_of_queue = 0;
1497
1498         if (devargs_all.is_rx_pcap) {
1499                 /*
1500                  * We check whether we want to infinitely rx the pcap file.
1501                  */
1502                 unsigned int infinite_rx_arg_cnt = rte_kvargs_count(kvlist,
1503                                 ETH_PCAP_INFINITE_RX_ARG);
1504
1505                 if (infinite_rx_arg_cnt == 1) {
1506                         ret = rte_kvargs_process(kvlist,
1507                                         ETH_PCAP_INFINITE_RX_ARG,
1508                                         &get_infinite_rx_arg,
1509                                         &devargs_all.infinite_rx);
1510                         if (ret < 0)
1511                                 goto free_kvlist;
1512                         PMD_LOG(INFO, "infinite_rx has been %s for %s",
1513                                         devargs_all.infinite_rx ? "enabled" : "disabled",
1514                                         name);
1515
1516                 } else if (infinite_rx_arg_cnt > 1) {
1517                         PMD_LOG(WARNING, "infinite_rx has not been enabled since the "
1518                                         "argument has been provided more than once "
1519                                         "for %s", name);
1520                 }
1521
1522                 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
1523                                 &open_rx_pcap, &pcaps);
1524         } else if (devargs_all.is_rx_iface) {
1525                 ret = rte_kvargs_process(kvlist, NULL,
1526                                 &rx_iface_args_process, &pcaps);
1527         } else if (devargs_all.is_tx_iface || devargs_all.is_tx_pcap) {
1528                 unsigned int i;
1529
1530                 /* Count number of tx queue args passed before dummy rx queue
1531                  * creation so a dummy rx queue can be created for each tx queue
1532                  */
1533                 unsigned int num_tx_queues =
1534                         (rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) +
1535                         rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG));
1536
1537                 PMD_LOG(INFO, "Creating null rx queue since no rx queues were provided.");
1538
1539                 /* Creating a dummy rx queue for each tx queue passed */
1540                 for (i = 0; i < num_tx_queues; i++)
1541                         ret = add_queue(&pcaps, "dummy_rx", "rx_null", NULL,
1542                                         NULL);
1543         } else {
1544                 PMD_LOG(ERR, "Error - No rx or tx queues provided");
1545                 ret = -ENOENT;
1546         }
1547         if (ret < 0)
1548                 goto free_kvlist;
1549
1550         /*
1551          * We check whether we want to open a TX stream to a real NIC,
1552          * a pcap file, or drop packets on tx
1553          */
1554         if (devargs_all.is_tx_pcap) {
1555                 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
1556                                 &open_tx_pcap, &dumpers);
1557         } else if (devargs_all.is_tx_iface) {
1558                 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
1559                                 &open_tx_iface, &dumpers);
1560         } else {
1561                 unsigned int i;
1562
1563                 PMD_LOG(INFO, "Dropping packets on tx since no tx queues were provided.");
1564
1565                 /* Add 1 dummy queue per rxq which counts and drops packets. */
1566                 for (i = 0; i < pcaps.num_of_queue; i++)
1567                         ret = add_queue(&dumpers, "dummy_tx", "tx_drop", NULL,
1568                                         NULL);
1569         }
1570
1571         if (ret < 0)
1572                 goto free_kvlist;
1573
1574 create_eth:
1575         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1576                 struct pmd_process_private *pp;
1577                 unsigned int i;
1578
1579                 internal = eth_dev->data->dev_private;
1580                         pp = (struct pmd_process_private *)
1581                                 rte_zmalloc(NULL,
1582                                         sizeof(struct pmd_process_private),
1583                                         RTE_CACHE_LINE_SIZE);
1584
1585                 if (pp == NULL) {
1586                         PMD_LOG(ERR,
1587                                 "Failed to allocate memory for process private");
1588                         ret = -1;
1589                         goto free_kvlist;
1590                 }
1591
1592                 eth_dev->dev_ops = &ops;
1593                 eth_dev->device = &dev->device;
1594
1595                 /* setup process private */
1596                 for (i = 0; i < pcaps.num_of_queue; i++)
1597                         pp->rx_pcap[i] = pcaps.queue[i].pcap;
1598
1599                 for (i = 0; i < dumpers.num_of_queue; i++) {
1600                         pp->tx_dumper[i] = dumpers.queue[i].dumper;
1601                         pp->tx_pcap[i] = dumpers.queue[i].pcap;
1602                 }
1603
1604                 eth_dev->process_private = pp;
1605                 eth_dev->rx_pkt_burst = eth_pcap_rx;
1606                 if (devargs_all.is_tx_pcap)
1607                         eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1608                 else
1609                         eth_dev->tx_pkt_burst = eth_pcap_tx;
1610
1611                 rte_eth_dev_probing_finish(eth_dev);
1612                 goto free_kvlist;
1613         }
1614
1615         devargs_all.rx_queues = pcaps;
1616         devargs_all.tx_queues = dumpers;
1617
1618         ret = eth_from_pcaps(dev, &devargs_all);
1619
1620 free_kvlist:
1621         rte_kvargs_free(kvlist);
1622
1623         if (ret < 0)
1624                 eth_release_pcaps(&pcaps, &dumpers, devargs_all.single_iface);
1625
1626         return ret;
1627 }
1628
1629 static int
1630 pmd_pcap_remove(struct rte_vdev_device *dev)
1631 {
1632         struct rte_eth_dev *eth_dev = NULL;
1633
1634         if (!dev)
1635                 return -1;
1636
1637         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1638         if (eth_dev == NULL)
1639                 return 0; /* port already released */
1640
1641         eth_dev_close(eth_dev);
1642         rte_eth_dev_release_port(eth_dev);
1643
1644         return 0;
1645 }
1646
1647 static struct rte_vdev_driver pmd_pcap_drv = {
1648         .probe = pmd_pcap_probe,
1649         .remove = pmd_pcap_remove,
1650 };
1651
1652 RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
1653 RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap);
1654 RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
1655         ETH_PCAP_RX_PCAP_ARG "=<string> "
1656         ETH_PCAP_TX_PCAP_ARG "=<string> "
1657         ETH_PCAP_RX_IFACE_ARG "=<ifc> "
1658         ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
1659         ETH_PCAP_TX_IFACE_ARG "=<ifc> "
1660         ETH_PCAP_IFACE_ARG "=<ifc> "
1661         ETH_PCAP_PHY_MAC_ARG "=<int>"
1662         ETH_PCAP_INFINITE_RX_ARG "=<0|1>");