X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fpcap%2Frte_eth_pcap.c;h=90f5d75ea87f5d8af6303acfed9257afcbed646b;hb=00984de53312b8af1bb1b6579d69a3eeda4be381;hp=057aa9dbfc3d9fcb132bdab93cdd4604fc059011;hpb=113a461e82d3abe450da1daa21deebdea218532b;p=dpdk.git diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index 057aa9dbfc..90f5d75ea8 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -19,8 +19,8 @@ #include #include -#include -#include +#include +#include #include #include #include @@ -51,6 +51,9 @@ static uint64_t start_cycles; static uint64_t hz; static uint8_t iface_idx; +static uint64_t timestamp_rx_dynflag; +static int timestamp_dynfield_offset = -1; + struct queue_stat { volatile unsigned long pkts; volatile unsigned long bytes; @@ -265,9 +268,11 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } mbuf->pkt_len = (uint16_t)header.caplen; - mbuf->timestamp = (uint64_t)header.ts.tv_sec * 1000000 - + header.ts.tv_usec; - mbuf->ol_flags |= PKT_RX_TIMESTAMP; + *RTE_MBUF_DYNFIELD(mbuf, timestamp_dynfield_offset, + rte_mbuf_timestamp_t *) = + (uint64_t)header.ts.tv_sec * 1000000 + + header.ts.tv_usec; + mbuf->ol_flags |= timestamp_rx_dynflag; mbuf->port = pcap_q->port_id; bufs[num_rx] = mbuf; num_rx++; @@ -381,7 +386,7 @@ eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) return 0; for (i = 0; i < nb_pkts; i++) { - tx_bytes += bufs[i]->data_len; + tx_bytes += bufs[i]->pkt_len; rte_pktmbuf_free(bufs[i]); } @@ -607,7 +612,7 @@ status_up: * Is the only place for us to close all the tx streams dumpers. * If not called the dumpers will be flushed within each tx burst. */ -static void +static int eth_dev_stop(struct rte_eth_dev *dev) { unsigned int i; @@ -649,6 +654,8 @@ status_down: dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; dev->data->dev_link.link_status = ETH_LINK_DOWN; + + return 0; } static int @@ -728,6 +735,17 @@ eth_stats_reset(struct rte_eth_dev *dev) return 0; } +static inline void +infinite_rx_ring_free(struct rte_ring *pkts) +{ + struct rte_mbuf *bufs; + + while (!rte_ring_dequeue(pkts, (void **)&bufs)) + rte_pktmbuf_free(bufs); + + rte_ring_free(pkts); +} + static int eth_dev_close(struct rte_eth_dev *dev) { @@ -746,13 +764,15 @@ eth_dev_close(struct rte_eth_dev *dev) if (internals->infinite_rx) { for (i = 0; i < dev->data->nb_rx_queues; i++) { struct pcap_rx_queue *pcap_q = &internals->rx_queue[i]; - struct rte_mbuf *pcap_buf; - while (!rte_ring_dequeue(pcap_q->pkts, - (void **)&pcap_buf)) - rte_pktmbuf_free(pcap_buf); + /* + * 'pcap_q->pkts' can be NULL if 'eth_dev_close()' + * called before 'eth_rx_queue_setup()' has been called + */ + if (pcap_q->pkts == NULL) + continue; - rte_ring_free(pcap_q->pkts); + infinite_rx_ring_free(pcap_q->pkts); } } @@ -821,21 +841,25 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, while (eth_pcap_rx(pcap_q, bufs, 1)) { /* Check for multiseg mbufs. */ if (bufs[0]->nb_segs != 1) { - rte_pktmbuf_free(*bufs); - - while (!rte_ring_dequeue(pcap_q->pkts, - (void **)bufs)) - rte_pktmbuf_free(*bufs); - - rte_ring_free(pcap_q->pkts); - PMD_LOG(ERR, "Multiseg mbufs are not supported in infinite_rx " - "mode."); + infinite_rx_ring_free(pcap_q->pkts); + PMD_LOG(ERR, + "Multiseg mbufs are not supported in infinite_rx mode."); return -EINVAL; } rte_ring_enqueue_bulk(pcap_q->pkts, (void * const *)bufs, 1, NULL); } + + if (rte_ring_count(pcap_q->pkts) < pcap_pkt_count) { + infinite_rx_ring_free(pcap_q->pkts); + PMD_LOG(ERR, + "Not enough mbufs to accommodate packets in pcap file. " + "At least %" PRIu64 " mbufs per queue is required.", + pcap_pkt_count); + return -EINVAL; + } + /* * Reset the stats for this queue since eth_pcap_rx calls above * didn't result in the application receiving packets. @@ -1149,6 +1173,7 @@ pmd_init_internals(struct rte_vdev_device *vdev, data->mac_addrs = &(*internals)->eth_addr; data->promiscuous = 1; data->all_multicast = 1; + data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; /* * NOTE: we'll replace the data element, of originally allocated @@ -1309,9 +1334,8 @@ eth_from_pcaps(struct rte_vdev_device *vdev, /* phy_mac arg is applied only only if "iface" devarg is provided */ if (rx_queues->phy_mac) { - int ret = eth_pcap_update_mac(rx_queues->queue[0].name, - eth_dev, vdev->device.numa_node); - if (ret == 0) + if (eth_pcap_update_mac(rx_queues->queue[0].name, + eth_dev, vdev->device.numa_node) == 0) internals->phy_mac = 1; } } @@ -1363,6 +1387,13 @@ pmd_pcap_probe(struct rte_vdev_device *dev) start_cycles = rte_get_timer_cycles(); hz = rte_get_timer_hz(); + ret = rte_mbuf_dyn_rx_timestamp_register(×tamp_dynfield_offset, + ×tamp_rx_dynflag); + if (ret != 0) { + PMD_LOG(ERR, "Failed to register Rx timestamp field/flag"); + return -1; + } + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { eth_dev = rte_eth_dev_attach_secondary(name); if (!eth_dev) {