#include <pcap.h>
#include <rte_cycles.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_vdev.h>
+#include <ethdev_driver.h>
+#include <ethdev_vdev.h>
#include <rte_kvargs.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
-#define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN
+#define RTE_ETH_PCAP_SNAPLEN RTE_ETHER_MAX_JUMBO_FRAME_LEN
#define RTE_ETH_PCAP_PROMISC 1
#define RTE_ETH_PCAP_TIMEOUT -1
#define ETH_PCAP_TX_IFACE_ARG "tx_iface"
#define ETH_PCAP_IFACE_ARG "iface"
#define ETH_PCAP_PHY_MAC_ARG "phy_mac"
+#define ETH_PCAP_INFINITE_RX_ARG "infinite_rx"
#define ETH_PCAP_ARG_MAXLEN 64
#define RTE_PMD_PCAP_MAX_QUEUES 16
static char errbuf[PCAP_ERRBUF_SIZE];
-static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
static struct timeval start_time;
static uint64_t start_cycles;
static uint64_t hz;
static uint8_t iface_idx;
+static uint64_t timestamp_rx_dynflag;
+static int timestamp_dynfield_offset = -1;
+
struct queue_stat {
volatile unsigned long pkts;
volatile unsigned long bytes;
volatile unsigned long err_pkts;
};
+struct queue_missed_stat {
+ /* last value retrieved from pcap */
+ unsigned int pcap;
+ /* stores values lost by pcap stop or rollover */
+ unsigned long mnemonic;
+ /* value on last reset */
+ unsigned long reset;
+};
+
struct pcap_rx_queue {
uint16_t port_id;
uint16_t queue_id;
struct rte_mempool *mb_pool;
struct queue_stat rx_stat;
+ struct queue_missed_stat missed_stat;
char name[PATH_MAX];
char type[ETH_PCAP_ARG_MAXLEN];
+
+ /* Contains pre-generated packets to be looped through */
+ struct rte_ring *pkts;
};
struct pcap_tx_queue {
struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
char devargs[ETH_PCAP_ARG_MAXLEN];
- struct ether_addr eth_addr;
+ struct rte_ether_addr eth_addr;
int if_index;
int single_iface;
int phy_mac;
+ unsigned int infinite_rx;
};
struct pmd_process_private {
int phy_mac;
};
+struct pmd_devargs_all {
+ struct pmd_devargs rx_queues;
+ struct pmd_devargs tx_queues;
+ int single_iface;
+ unsigned int is_tx_pcap;
+ unsigned int is_tx_iface;
+ unsigned int is_rx_pcap;
+ unsigned int is_rx_iface;
+ unsigned int infinite_rx;
+};
+
static const char *valid_arguments[] = {
ETH_PCAP_RX_PCAP_ARG,
ETH_PCAP_TX_PCAP_ARG,
ETH_PCAP_TX_IFACE_ARG,
ETH_PCAP_IFACE_ARG,
ETH_PCAP_PHY_MAC_ARG,
+ ETH_PCAP_INFINITE_RX_ARG,
NULL
};
.link_autoneg = ETH_LINK_FIXED,
};
-static int eth_pcap_logtype;
+RTE_LOG_REGISTER(eth_pcap_logtype, pmd.net.pcap, NOTICE);
#define PMD_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
"%s(): " fmt "\n", __func__, ##args)
+static struct queue_missed_stat*
+queue_missed_stat_update(struct rte_eth_dev *dev, unsigned int qid)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct queue_missed_stat *missed_stat =
+ &internals->rx_queue[qid].missed_stat;
+ const struct pmd_process_private *pp = dev->process_private;
+ pcap_t *pcap = pp->rx_pcap[qid];
+ struct pcap_stat stat;
+
+ if (!pcap || (pcap_stats(pcap, &stat) != 0))
+ return missed_stat;
+
+ /* rollover check - best effort fixup assuming single rollover */
+ if (stat.ps_drop < missed_stat->pcap)
+ missed_stat->mnemonic += UINT_MAX;
+ missed_stat->pcap = stat.ps_drop;
+
+ return missed_stat;
+}
+
+static void
+queue_missed_stat_on_stop_update(struct rte_eth_dev *dev, unsigned int qid)
+{
+ struct queue_missed_stat *missed_stat =
+ queue_missed_stat_update(dev, qid);
+
+ missed_stat->mnemonic += missed_stat->pcap;
+ missed_stat->pcap = 0;
+}
+
+static void
+queue_missed_stat_reset(struct rte_eth_dev *dev, unsigned int qid)
+{
+ struct queue_missed_stat *missed_stat =
+ queue_missed_stat_update(dev, qid);
+
+ missed_stat->reset = missed_stat->pcap;
+ missed_stat->mnemonic = 0;
+}
+
+static unsigned long
+queue_missed_stat_get(struct rte_eth_dev *dev, unsigned int qid)
+{
+ const struct queue_missed_stat *missed_stat =
+ queue_missed_stat_update(dev, qid);
+
+ return missed_stat->pcap + missed_stat->mnemonic - missed_stat->reset;
+}
+
static int
eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
const u_char *data, uint16_t data_len)
return mbuf->nb_segs;
}
-/* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
-static void
-eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
+static uint16_t
+eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
- uint16_t data_len = 0;
+ int i;
+ struct pcap_rx_queue *pcap_q = queue;
+ uint32_t rx_bytes = 0;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
- while (mbuf) {
- rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
- mbuf->data_len);
+ if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0)
+ return 0;
- data_len += mbuf->data_len;
- mbuf = mbuf->next;
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *pcap_buf;
+ int err = rte_ring_dequeue(pcap_q->pkts, (void **)&pcap_buf);
+ if (err)
+ return i;
+
+ rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *),
+ rte_pktmbuf_mtod(pcap_buf, void *),
+ pcap_buf->data_len);
+ bufs[i]->data_len = pcap_buf->data_len;
+ bufs[i]->pkt_len = pcap_buf->pkt_len;
+ bufs[i]->port = pcap_q->port_id;
+ rx_bytes += pcap_buf->data_len;
+
+ /* Enqueue packet back on ring to allow infinite rx. */
+ rte_ring_enqueue(pcap_q->pkts, pcap_buf);
}
+
+ pcap_q->rx_stat.pkts += i;
+ pcap_q->rx_stat.bytes += rx_bytes;
+
+ return i;
}
static uint16_t
struct rte_mbuf *mbuf;
struct pcap_rx_queue *pcap_q = queue;
uint16_t num_rx = 0;
- uint16_t buf_size;
uint32_t rx_bytes = 0;
pcap_t *pcap;
if (unlikely(mbuf == NULL))
break;
- /* Now get the space available for data in the mbuf */
- buf_size = rte_pktmbuf_data_room_size(pcap_q->mb_pool) -
- RTE_PKTMBUF_HEADROOM;
-
- if (header.caplen <= buf_size) {
+ if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) {
/* pcap packet will fit in the mbuf, can copy it */
rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
header.caplen);
}
mbuf->pkt_len = (uint16_t)header.caplen;
+ *RTE_MBUF_DYNFIELD(mbuf, timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) =
+ (uint64_t)header.ts.tv_sec * 1000000 +
+ header.ts.tv_usec;
+ mbuf->ol_flags |= timestamp_rx_dynflag;
mbuf->port = pcap_q->port_id;
bufs[num_rx] = mbuf;
num_rx++;
return num_rx;
}
+static uint16_t
+eth_null_rx(void *queue __rte_unused,
+ struct rte_mbuf **bufs __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+#define NSEC_PER_SEC 1000000000L
+
static inline void
calculate_timestamp(struct timeval *ts) {
uint64_t cycles;
cycles = rte_get_timer_cycles() - start_cycles;
cur_time.tv_sec = cycles / hz;
- cur_time.tv_usec = (cycles % hz) * 1e6 / hz;
- timeradd(&start_time, &cur_time, ts);
+ cur_time.tv_usec = (cycles % hz) * NSEC_PER_SEC / hz;
+
+ ts->tv_sec = start_time.tv_sec + cur_time.tv_sec;
+ ts->tv_usec = start_time.tv_usec + cur_time.tv_usec;
+ if (ts->tv_usec >= NSEC_PER_SEC) {
+ ts->tv_usec -= NSEC_PER_SEC;
+ ts->tv_sec += 1;
+ }
}
/*
uint32_t tx_bytes = 0;
struct pcap_pkthdr header;
pcap_dumper_t *dumper;
+ unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
+ size_t len, caplen;
pp = rte_eth_devices[dumper_q->port_id].process_private;
dumper = pp->tx_dumper[dumper_q->queue_id];
* dumper */
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
- calculate_timestamp(&header.ts);
- header.len = mbuf->pkt_len;
- header.caplen = header.len;
-
- if (likely(mbuf->nb_segs == 1)) {
- pcap_dump((u_char *)dumper, &header,
- rte_pktmbuf_mtod(mbuf, void*));
- } else {
- if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
- eth_pcap_gather_data(tx_pcap_data, mbuf);
- pcap_dump((u_char *)dumper, &header,
- tx_pcap_data);
- } else {
- PMD_LOG(ERR,
- "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
- mbuf->pkt_len,
- ETHER_MAX_JUMBO_FRAME_LEN);
-
- rte_pktmbuf_free(mbuf);
- break;
- }
+ len = caplen = rte_pktmbuf_pkt_len(mbuf);
+ if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
+ len > sizeof(temp_data))) {
+ caplen = sizeof(temp_data);
}
+ calculate_timestamp(&header.ts);
+ header.len = len;
+ header.caplen = caplen;
+ /* rte_pktmbuf_read() returns a pointer to the data directly
+ * in the mbuf (when the mbuf is contiguous) or, otherwise,
+ * a pointer to temp_data after copying into it.
+ */
+ pcap_dump((u_char *)dumper, &header,
+ rte_pktmbuf_read(mbuf, 0, caplen, temp_data));
+
num_tx++;
- tx_bytes += mbuf->pkt_len;
+ tx_bytes += caplen;
rte_pktmbuf_free(mbuf);
}
dumper_q->tx_stat.bytes += tx_bytes;
dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
- return num_tx;
+ return nb_pkts;
+}
+
+/*
+ * Callback to handle dropping packets in the infinite rx case.
+ */
+static uint16_t
+eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ unsigned int i;
+ uint32_t tx_bytes = 0;
+ struct pcap_tx_queue *tx_queue = queue;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ tx_bytes += bufs[i]->pkt_len;
+ rte_pktmbuf_free(bufs[i]);
+ }
+
+ tx_queue->tx_stat.pkts += nb_pkts;
+ tx_queue->tx_stat.bytes += tx_bytes;
+
+ return i;
}
/*
uint16_t num_tx = 0;
uint32_t tx_bytes = 0;
pcap_t *pcap;
+ unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
+ size_t len;
pp = rte_eth_devices[tx_queue->port_id].process_private;
pcap = pp->tx_pcap[tx_queue->queue_id];
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
-
- if (likely(mbuf->nb_segs == 1)) {
- ret = pcap_sendpacket(pcap,
- rte_pktmbuf_mtod(mbuf, u_char *),
- mbuf->pkt_len);
- } else {
- if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
- eth_pcap_gather_data(tx_pcap_data, mbuf);
- ret = pcap_sendpacket(pcap,
- tx_pcap_data, mbuf->pkt_len);
- } else {
- PMD_LOG(ERR,
- "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
- mbuf->pkt_len,
- ETHER_MAX_JUMBO_FRAME_LEN);
-
- rte_pktmbuf_free(mbuf);
- break;
- }
+ len = rte_pktmbuf_pkt_len(mbuf);
+ if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
+ len > sizeof(temp_data))) {
+ PMD_LOG(ERR,
+ "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).",
+ len, sizeof(temp_data));
+ rte_pktmbuf_free(mbuf);
+ continue;
}
+ /* rte_pktmbuf_read() returns a pointer to the data directly
+ * in the mbuf (when the mbuf is contiguous) or, otherwise,
+ * a pointer to temp_data after copying into it.
+ */
+ ret = pcap_sendpacket(pcap,
+ rte_pktmbuf_read(mbuf, 0, len, temp_data), len);
if (unlikely(ret != 0))
break;
num_tx++;
- tx_bytes += mbuf->pkt_len;
+ tx_bytes += len;
rte_pktmbuf_free(mbuf);
}
tx_queue->tx_stat.pkts += num_tx;
tx_queue->tx_stat.bytes += tx_bytes;
- tx_queue->tx_stat.err_pkts += nb_pkts - num_tx;
+ tx_queue->tx_stat.err_pkts += i - num_tx;
- return num_tx;
+ return i;
}
/*
* with pcap_dump_open(). We create big enough an Ethernet
* pcap holder.
*/
- tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN);
+ tx_pcap = pcap_open_dead_with_tstamp_precision(DLT_EN10MB,
+ RTE_ETH_PCAP_SNAPSHOT_LEN, PCAP_TSTAMP_PRECISION_NANO);
if (tx_pcap == NULL) {
PMD_LOG(ERR, "Couldn't create dead pcap");
return -1;
return 0;
}
+static uint64_t
+count_packets_in_pcap(pcap_t **pcap, struct pcap_rx_queue *pcap_q)
+{
+ const u_char *packet;
+ struct pcap_pkthdr header;
+ uint64_t pcap_pkt_count = 0;
+
+ while ((packet = pcap_next(*pcap, &header)))
+ pcap_pkt_count++;
+
+ /* The pcap is reopened so it can be used as normal later. */
+ pcap_close(*pcap);
+ *pcap = NULL;
+ open_single_rx_pcap(pcap_q->name, pcap);
+
+ return pcap_pkt_count;
+}
+
static int
eth_dev_start(struct rte_eth_dev *dev)
{
* Is the only place for us to close all the tx streams dumpers.
* If not called the dumpers will be flushed within each tx burst.
*/
-static void
+static int
eth_dev_stop(struct rte_eth_dev *dev)
{
unsigned int i;
/* Special iface case. Single pcap is open and shared between tx/rx. */
if (internals->single_iface) {
- pcap_close(pp->tx_pcap[0]);
- pp->tx_pcap[0] = NULL;
- pp->rx_pcap[0] = NULL;
+ queue_missed_stat_on_stop_update(dev, 0);
+ if (pp->tx_pcap[0] != NULL) {
+ pcap_close(pp->tx_pcap[0]);
+ pp->tx_pcap[0] = NULL;
+ pp->rx_pcap[0] = NULL;
+ }
goto status_down;
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (pp->rx_pcap[i] != NULL) {
+ queue_missed_stat_on_stop_update(dev, i);
pcap_close(pp->rx_pcap[i]);
pp->rx_pcap[i] = NULL;
}
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+ return 0;
}
static int
return 0;
}
-static void
+static int
eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
dev_info->max_rx_queues = dev->data->nb_rx_queues;
dev_info->max_tx_queues = dev->data->nb_tx_queues;
dev_info->min_rx_bufsize = 0;
+
+ return 0;
}
static int
{
unsigned int i;
unsigned long rx_packets_total = 0, rx_bytes_total = 0;
+ unsigned long rx_missed_total = 0;
unsigned long tx_packets_total = 0, tx_bytes_total = 0;
unsigned long tx_packets_err_total = 0;
const struct pmd_internals *internal = dev->data->dev_private;
stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
rx_packets_total += stats->q_ipackets[i];
rx_bytes_total += stats->q_ibytes[i];
+ rx_missed_total += queue_missed_stat_get(dev, i);
}
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < dev->data->nb_tx_queues; i++) {
stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
- stats->q_errors[i] = internal->tx_queue[i].tx_stat.err_pkts;
tx_packets_total += stats->q_opackets[i];
tx_bytes_total += stats->q_obytes[i];
- tx_packets_err_total += stats->q_errors[i];
+ tx_packets_err_total += internal->tx_queue[i].tx_stat.err_pkts;
}
stats->ipackets = rx_packets_total;
stats->ibytes = rx_bytes_total;
+ stats->imissed = rx_missed_total;
stats->opackets = tx_packets_total;
stats->obytes = tx_bytes_total;
stats->oerrors = tx_packets_err_total;
return 0;
}
-static void
+static int
eth_stats_reset(struct rte_eth_dev *dev)
{
unsigned int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
internal->rx_queue[i].rx_stat.pkts = 0;
internal->rx_queue[i].rx_stat.bytes = 0;
+ queue_missed_stat_reset(dev, i);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
internal->tx_queue[i].tx_stat.bytes = 0;
internal->tx_queue[i].tx_stat.err_pkts = 0;
}
+
+ return 0;
}
-static void
-eth_dev_close(struct rte_eth_dev *dev __rte_unused)
+static inline void
+infinite_rx_ring_free(struct rte_ring *pkts)
+{
+ struct rte_mbuf *bufs;
+
+ while (!rte_ring_dequeue(pkts, (void **)&bufs))
+ rte_pktmbuf_free(bufs);
+
+ rte_ring_free(pkts);
+}
+
+static int
+eth_dev_close(struct rte_eth_dev *dev)
{
+ unsigned int i;
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ PMD_LOG(INFO, "Closing pcap ethdev on NUMA socket %d",
+ rte_socket_id());
+
+ eth_dev_stop(dev);
+
+ rte_free(dev->process_private);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Device wide flag, but cleanup must be performed per queue. */
+ if (internals->infinite_rx) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct pcap_rx_queue *pcap_q = &internals->rx_queue[i];
+
+ /*
+ * 'pcap_q->pkts' can be NULL if 'eth_dev_close()'
+ * called before 'eth_rx_queue_setup()' has been called
+ */
+ if (pcap_q->pkts == NULL)
+ continue;
+
+ infinite_rx_ring_free(pcap_q->pkts);
+ }
+ }
+
+ if (internals->phy_mac == 0)
+ /* not dynamically allocated, must not be freed */
+ dev->data->mac_addrs = NULL;
+
+ return 0;
}
static void
pcap_q->queue_id = rx_queue_id;
dev->data->rx_queues[rx_queue_id] = pcap_q;
+ if (internals->infinite_rx) {
+ struct pmd_process_private *pp;
+ char ring_name[NAME_MAX];
+ static uint32_t ring_number;
+ uint64_t pcap_pkt_count = 0;
+ struct rte_mbuf *bufs[1];
+ pcap_t **pcap;
+
+ pp = rte_eth_devices[pcap_q->port_id].process_private;
+ pcap = &pp->rx_pcap[pcap_q->queue_id];
+
+ if (unlikely(*pcap == NULL))
+ return -ENOENT;
+
+ pcap_pkt_count = count_packets_in_pcap(pcap, pcap_q);
+
+ snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu32,
+ ring_number);
+
+ pcap_q->pkts = rte_ring_create(ring_name,
+ rte_align64pow2(pcap_pkt_count + 1), 0,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ ring_number++;
+ if (!pcap_q->pkts)
+ return -ENOENT;
+
+ /* Fill ring with packets from PCAP file one by one. */
+ while (eth_pcap_rx(pcap_q, bufs, 1)) {
+ /* Check for multiseg mbufs. */
+ if (bufs[0]->nb_segs != 1) {
+ infinite_rx_ring_free(pcap_q->pkts);
+ PMD_LOG(ERR,
+ "Multiseg mbufs are not supported in infinite_rx mode.");
+ return -EINVAL;
+ }
+
+ rte_ring_enqueue_bulk(pcap_q->pkts,
+ (void * const *)bufs, 1, NULL);
+ }
+
+ if (rte_ring_count(pcap_q->pkts) < pcap_pkt_count) {
+ infinite_rx_ring_free(pcap_q->pkts);
+ PMD_LOG(ERR,
+ "Not enough mbufs to accommodate packets in pcap file. "
+ "At least %" PRIu64 " mbufs per queue is required.",
+ pcap_pkt_count);
+ return -EINVAL;
+ }
+
+ /*
+ * Reset the stats for this queue since eth_pcap_rx calls above
+ * didn't result in the application receiving packets.
+ */
+ pcap_q->rx_stat.pkts = 0;
+ pcap_q->rx_stat.bytes = 0;
+ }
+
return 0;
}
return 0;
}
-static struct rte_vdev_driver pmd_pcap_drv;
+static int
+get_infinite_rx_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (extra_args) {
+ const int infinite_rx = atoi(value);
+ int *enable_infinite_rx = extra_args;
+
+ if (infinite_rx > 0)
+ *enable_infinite_rx = 1;
+ }
+ return 0;
+}
static int
pmd_init_internals(struct rte_vdev_device *vdev,
* derived from: 'locally administered':'p':'c':'a':'p':'iface_idx'
* where the middle 4 characters are converted to hex.
*/
- (*internals)->eth_addr = (struct ether_addr) {
+ (*internals)->eth_addr = (struct rte_ether_addr) {
.addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ }
};
(*internals)->phy_mac = 0;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
data->mac_addrs = &(*internals)->eth_addr;
+ data->promiscuous = 1;
+ data->all_multicast = 1;
+ data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/*
* NOTE: we'll replace the data element, of originally allocated
return -1;
}
- mac_addrs = rte_zmalloc_socket(NULL, ETHER_ADDR_LEN, 0, numa_node);
+ mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
if (!mac_addrs) {
close(if_fd);
return -1;
PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
eth_dev->data->mac_addrs = mac_addrs;
rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
- ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
close(if_fd);
ifm = (struct if_msghdr *)buf;
sdl = (struct sockaddr_dl *)(ifm + 1);
- mac_addrs = rte_zmalloc_socket(NULL, ETHER_ADDR_LEN, 0, numa_node);
+ mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node);
if (!mac_addrs) {
rte_free(buf);
return -1;
PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
eth_dev->data->mac_addrs = mac_addrs;
rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
- LLADDR(sdl), ETHER_ADDR_LEN);
+ LLADDR(sdl), RTE_ETHER_ADDR_LEN);
rte_free(buf);
static int
eth_from_pcaps_common(struct rte_vdev_device *vdev,
- struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
- struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
+ struct pmd_devargs_all *devargs_all,
struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
{
struct pmd_process_private *pp;
+ struct pmd_devargs *rx_queues = &devargs_all->rx_queues;
+ struct pmd_devargs *tx_queues = &devargs_all->tx_queues;
+ const unsigned int nb_rx_queues = rx_queues->num_of_queue;
+ const unsigned int nb_tx_queues = tx_queues->num_of_queue;
unsigned int i;
- /* do some parameter checking */
- if (rx_queues == NULL && nb_rx_queues > 0)
- return -1;
- if (tx_queues == NULL && nb_tx_queues > 0)
- return -1;
-
if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals,
eth_dev) < 0)
return -1;
static int
eth_from_pcaps(struct rte_vdev_device *vdev,
- struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
- struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
- int single_iface, unsigned int using_dumpers)
+ struct pmd_devargs_all *devargs_all)
{
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
+ struct pmd_devargs *rx_queues = &devargs_all->rx_queues;
+ int single_iface = devargs_all->single_iface;
+ unsigned int infinite_rx = devargs_all->infinite_rx;
int ret;
- ret = eth_from_pcaps_common(vdev, rx_queues, nb_rx_queues,
- tx_queues, nb_tx_queues, &internals, ð_dev);
+ ret = eth_from_pcaps_common(vdev, devargs_all, &internals, ð_dev);
if (ret < 0)
return ret;
/* phy_mac arg is applied only only if "iface" devarg is provided */
if (rx_queues->phy_mac) {
- int ret = eth_pcap_update_mac(rx_queues->queue[0].name,
- eth_dev, vdev->device.numa_node);
- if (ret == 0)
+ if (eth_pcap_update_mac(rx_queues->queue[0].name,
+ eth_dev, vdev->device.numa_node) == 0)
internals->phy_mac = 1;
}
}
- eth_dev->rx_pkt_burst = eth_pcap_rx;
+ internals->infinite_rx = infinite_rx;
+ /* Assign rx ops. */
+ if (infinite_rx)
+ eth_dev->rx_pkt_burst = eth_pcap_rx_infinite;
+ else if (devargs_all->is_rx_pcap || devargs_all->is_rx_iface ||
+ single_iface)
+ eth_dev->rx_pkt_burst = eth_pcap_rx;
+ else
+ eth_dev->rx_pkt_burst = eth_null_rx;
- if (using_dumpers)
+ /* Assign tx ops. */
+ if (devargs_all->is_tx_pcap)
eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
- else
+ else if (devargs_all->is_tx_iface || single_iface)
eth_dev->tx_pkt_burst = eth_pcap_tx;
+ else
+ eth_dev->tx_pkt_burst = eth_tx_drop;
rte_eth_dev_probing_finish(eth_dev);
return 0;
pmd_pcap_probe(struct rte_vdev_device *dev)
{
const char *name;
- unsigned int is_rx_pcap = 0, is_tx_pcap = 0;
struct rte_kvargs *kvlist;
struct pmd_devargs pcaps = {0};
struct pmd_devargs dumpers = {0};
struct rte_eth_dev *eth_dev = NULL;
struct pmd_internals *internal;
- int single_iface = 0;
- int ret;
+ int ret = 0;
+
+ struct pmd_devargs_all devargs_all = {
+ .single_iface = 0,
+ .is_tx_pcap = 0,
+ .is_tx_iface = 0,
+ .infinite_rx = 0,
+ };
name = rte_vdev_device_name(dev);
PMD_LOG(INFO, "Initializing pmd_pcap for %s", name);
start_cycles = rte_get_timer_cycles();
hz = rte_get_timer_hz();
+ ret = rte_mbuf_dyn_rx_timestamp_register(×tamp_dynfield_offset,
+ ×tamp_rx_dynflag);
+ if (ret != 0) {
+ PMD_LOG(ERR, "Failed to register Rx timestamp field/flag");
+ return -1;
+ }
+
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
dumpers.phy_mac = pcaps.phy_mac;
- single_iface = 1;
+ devargs_all.single_iface = 1;
pcaps.num_of_queue = 1;
dumpers.num_of_queue = 1;
}
/*
- * We check whether we want to open a RX stream from a real NIC or a
- * pcap file
+ * We check whether we want to open a RX stream from a real NIC, a
+ * pcap file or open a dummy RX stream
*/
- is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
+ devargs_all.is_rx_pcap =
+ rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
+ devargs_all.is_rx_iface =
+ (rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_ARG) +
+ rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_IN_ARG)) ? 1 : 0;
pcaps.num_of_queue = 0;
- if (is_rx_pcap) {
+ devargs_all.is_tx_pcap =
+ rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
+ devargs_all.is_tx_iface =
+ rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG) ? 1 : 0;
+ dumpers.num_of_queue = 0;
+
+ if (devargs_all.is_rx_pcap) {
+ /*
+ * We check whether we want to infinitely rx the pcap file.
+ */
+ unsigned int infinite_rx_arg_cnt = rte_kvargs_count(kvlist,
+ ETH_PCAP_INFINITE_RX_ARG);
+
+ if (infinite_rx_arg_cnt == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_PCAP_INFINITE_RX_ARG,
+ &get_infinite_rx_arg,
+ &devargs_all.infinite_rx);
+ if (ret < 0)
+ goto free_kvlist;
+ PMD_LOG(INFO, "infinite_rx has been %s for %s",
+ devargs_all.infinite_rx ? "enabled" : "disabled",
+ name);
+
+ } else if (infinite_rx_arg_cnt > 1) {
+ PMD_LOG(WARNING, "infinite_rx has not been enabled since the "
+ "argument has been provided more than once "
+ "for %s", name);
+ }
+
ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
&open_rx_pcap, &pcaps);
- } else {
+ } else if (devargs_all.is_rx_iface) {
ret = rte_kvargs_process(kvlist, NULL,
&rx_iface_args_process, &pcaps);
- }
+ } else if (devargs_all.is_tx_iface || devargs_all.is_tx_pcap) {
+ unsigned int i;
+
+ /* Count number of tx queue args passed before dummy rx queue
+ * creation so a dummy rx queue can be created for each tx queue
+ */
+ unsigned int num_tx_queues =
+ (rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) +
+ rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG));
+ PMD_LOG(INFO, "Creating null rx queue since no rx queues were provided.");
+
+ /* Creating a dummy rx queue for each tx queue passed */
+ for (i = 0; i < num_tx_queues; i++)
+ ret = add_queue(&pcaps, "dummy_rx", "rx_null", NULL,
+ NULL);
+ } else {
+ PMD_LOG(ERR, "Error - No rx or tx queues provided");
+ ret = -ENOENT;
+ }
if (ret < 0)
goto free_kvlist;
/*
- * We check whether we want to open a TX stream to a real NIC or a
- * pcap file
+ * We check whether we want to open a TX stream to a real NIC,
+ * a pcap file, or drop packets on tx
*/
- is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
- dumpers.num_of_queue = 0;
-
- if (is_tx_pcap)
+ if (devargs_all.is_tx_pcap) {
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
&open_tx_pcap, &dumpers);
- else
+ } else if (devargs_all.is_tx_iface) {
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
&open_tx_iface, &dumpers);
+ } else {
+ unsigned int i;
+
+ PMD_LOG(INFO, "Dropping packets on tx since no tx queues were provided.");
+
+ /* Add 1 dummy queue per rxq which counts and drops packets. */
+ for (i = 0; i < pcaps.num_of_queue; i++)
+ ret = add_queue(&dumpers, "dummy_tx", "tx_drop", NULL,
+ NULL);
+ }
if (ret < 0)
goto free_kvlist;
eth_dev->process_private = pp;
eth_dev->rx_pkt_burst = eth_pcap_rx;
- if (is_tx_pcap)
+ if (devargs_all.is_tx_pcap)
eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
else
eth_dev->tx_pkt_burst = eth_pcap_tx;
goto free_kvlist;
}
- ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
- dumpers.num_of_queue, single_iface, is_tx_pcap);
+ devargs_all.rx_queues = pcaps;
+ devargs_all.tx_queues = dumpers;
+
+ ret = eth_from_pcaps(dev, &devargs_all);
free_kvlist:
rte_kvargs_free(kvlist);
static int
pmd_pcap_remove(struct rte_vdev_device *dev)
{
- struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
- PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d",
- rte_socket_id());
-
if (!dev)
return -1;
- /* reserve an ethdev entry */
eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
if (eth_dev == NULL)
- return -1;
+ return 0; /* port already released */
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- internals = eth_dev->data->dev_private;
- if (internals != NULL && internals->phy_mac == 0)
- /* not dynamically allocated, must not be freed */
- eth_dev->data->mac_addrs = NULL;
- }
-
- rte_free(eth_dev->process_private);
+ eth_dev_close(eth_dev);
rte_eth_dev_release_port(eth_dev);
return 0;
ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
ETH_PCAP_TX_IFACE_ARG "=<ifc> "
ETH_PCAP_IFACE_ARG "=<ifc> "
- ETH_PCAP_PHY_MAC_ARG "=<int>");
-
-RTE_INIT(eth_pcap_init_log)
-{
- eth_pcap_logtype = rte_log_register("pmd.net.pcap");
- if (eth_pcap_logtype >= 0)
- rte_log_set_level(eth_pcap_logtype, RTE_LOG_NOTICE);
-}
+ ETH_PCAP_PHY_MAC_ARG "=<int>"
+ ETH_PCAP_INFINITE_RX_ARG "=<0|1>");