X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvhost%2Frte_eth_vhost.c;h=a202931e9aed2010a6ad4265b3214fa8a0b36d2e;hb=fb3c4efb65be520082a34c0afb85d492ff093c82;hp=32ad27fe4e09d73edc7c2c3ef7f5c4585c1146cb;hpb=30410493759f4bae3f65497737661e27b93c2d0e;p=dpdk.git diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index 32ad27fe4e..a202931e9a 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -8,8 +8,8 @@ #include #include -#include -#include +#include +#include #include #include #include @@ -19,7 +19,7 @@ #include "rte_eth_vhost.h" -RTE_LOG_REGISTER(vhost_logtype, pmd.net.vhost, NOTICE); +RTE_LOG_REGISTER_DEFAULT(vhost_logtype, NOTICE); #define VHOST_LOG(level, ...) \ rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__) @@ -29,7 +29,6 @@ enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM}; #define ETH_VHOST_IFACE_ARG "iface" #define ETH_VHOST_QUEUES_ARG "queues" #define ETH_VHOST_CLIENT_ARG "client" -#define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy" #define ETH_VHOST_IOMMU_SUPPORT "iommu-support" #define ETH_VHOST_POSTCOPY_SUPPORT "postcopy-support" #define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso" @@ -41,7 +40,6 @@ static const char *valid_arguments[] = { ETH_VHOST_IFACE_ARG, ETH_VHOST_QUEUES_ARG, ETH_VHOST_CLIENT_ARG, - ETH_VHOST_DEQUEUE_ZERO_COPY, ETH_VHOST_IOMMU_SUPPORT, ETH_VHOST_POSTCOPY_SUPPORT, ETH_VHOST_VIRTIO_NET_F_HOST_TSO, @@ -73,6 +71,9 @@ enum vhost_xstats_pkts { VHOST_BROADCAST_PKT, VHOST_MULTICAST_PKT, VHOST_UNICAST_PKT, + VHOST_PKT, + VHOST_BYTE, + VHOST_MISSED_PKT, VHOST_ERRORS_PKT, VHOST_ERRORS_FRAGMENTED, VHOST_ERRORS_JABBER, @@ -149,11 +150,11 @@ struct vhost_xstats_name_off { /* [rx]_is prepended to the name string here */ static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = { {"good_packets", - offsetof(struct vhost_queue, stats.pkts)}, + offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])}, {"total_bytes", - offsetof(struct vhost_queue, stats.bytes)}, + offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])}, {"missed_pkts", - offsetof(struct vhost_queue, stats.missed_pkts)}, + offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])}, {"broadcast_packets", offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])}, {"multicast_packets", @@ -189,11 +190,11 @@ static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = { /* [tx]_ is prepended to the name string here */ static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = { {"good_packets", - offsetof(struct vhost_queue, stats.pkts)}, + offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])}, {"total_bytes", - offsetof(struct vhost_queue, stats.bytes)}, + offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])}, {"missed_pkts", - offsetof(struct vhost_queue, stats.missed_pkts)}, + offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])}, {"broadcast_packets", offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])}, {"multicast_packets", @@ -287,23 +288,6 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, if (n < nxstats) return nxstats; - for (i = 0; i < dev->data->nb_rx_queues; i++) { - vq = dev->data->rx_queues[i]; - if (!vq) - continue; - vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts - - (vq->stats.xstats[VHOST_BROADCAST_PKT] - + vq->stats.xstats[VHOST_MULTICAST_PKT]); - } - for (i = 0; i < dev->data->nb_tx_queues; i++) { - vq = dev->data->tx_queues[i]; - if (!vq) - continue; - vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts - + vq->stats.missed_pkts - - (vq->stats.xstats[VHOST_BROADCAST_PKT] - + vq->stats.xstats[VHOST_MULTICAST_PKT]); - } for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) { xstats[count].value = 0; for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -334,7 +318,7 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, } static inline void -vhost_count_multicast_broadcast(struct vhost_queue *vq, +vhost_count_xcast_packets(struct vhost_queue *vq, struct rte_mbuf *mbuf) { struct rte_ether_addr *ea = NULL; @@ -346,20 +330,27 @@ vhost_count_multicast_broadcast(struct vhost_queue *vq, pstats->xstats[VHOST_BROADCAST_PKT]++; else pstats->xstats[VHOST_MULTICAST_PKT]++; + } else { + pstats->xstats[VHOST_UNICAST_PKT]++; } } static void -vhost_update_packet_xstats(struct vhost_queue *vq, - struct rte_mbuf **bufs, - uint16_t count) +vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs, + uint16_t count, uint64_t nb_bytes, + uint64_t nb_missed) { uint32_t pkt_len = 0; uint64_t i = 0; uint64_t index; struct vhost_stats *pstats = &vq->stats; + pstats->xstats[VHOST_BYTE] += nb_bytes; + pstats->xstats[VHOST_MISSED_PKT] += nb_missed; + pstats->xstats[VHOST_UNICAST_PKT] += nb_missed; + for (i = 0; i < count ; i++) { + pstats->xstats[VHOST_PKT]++; pkt_len = bufs[i]->pkt_len; if (pkt_len == 64) { pstats->xstats[VHOST_64_PKT]++; @@ -375,7 +366,7 @@ vhost_update_packet_xstats(struct vhost_queue *vq, else if (pkt_len > 1522) pstats->xstats[VHOST_1523_TO_MAX_PKT]++; } - vhost_count_multicast_broadcast(vq, bufs[i]); + vhost_count_xcast_packets(vq, bufs[i]); } } @@ -385,6 +376,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) struct vhost_queue *r = q; uint16_t i, nb_rx = 0; uint16_t nb_receive = nb_bufs; + uint64_t nb_bytes = 0; if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) return 0; @@ -419,10 +411,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) if (r->internal->vlan_strip) rte_vlan_strip(bufs[i]); - r->stats.bytes += bufs[i]->pkt_len; + nb_bytes += bufs[i]->pkt_len; } - vhost_update_packet_xstats(r, bufs, nb_rx); + r->stats.bytes += nb_bytes; + vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0); out: rte_atomic32_set(&r->while_queuing, 0); @@ -436,6 +429,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) struct vhost_queue *r = q; uint16_t i, nb_tx = 0; uint16_t nb_send = 0; + uint64_t nb_bytes = 0; + uint64_t nb_missed = 0; if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) return 0; @@ -476,20 +471,23 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) break; } + for (i = 0; likely(i < nb_tx); i++) + nb_bytes += bufs[i]->pkt_len; + + nb_missed = nb_bufs - nb_tx; + r->stats.pkts += nb_tx; + r->stats.bytes += nb_bytes; r->stats.missed_pkts += nb_bufs - nb_tx; - for (i = 0; likely(i < nb_tx); i++) - r->stats.bytes += bufs[i]->pkt_len; - - vhost_update_packet_xstats(r, bufs, nb_tx); + vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed); - /* According to RFC2863 page42 section ifHCOutMulticastPkts and - * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast" - * are increased when packets are not transmitted successfully. + /* According to RFC2863, ifHCOutUcastPkts, ifHCOutMulticastPkts and + * ifHCOutBroadcastPkts counters are increased when packets are not + * transmitted successfully. */ for (i = nb_tx; i < nb_bufs; i++) - vhost_count_multicast_broadcast(r, bufs[i]); + vhost_count_xcast_packets(r, bufs[i]); for (i = 0; likely(i < nb_tx); i++) rte_pktmbuf_free(bufs[i]); @@ -1155,13 +1153,16 @@ eth_dev_start(struct rte_eth_dev *eth_dev) return 0; } -static void +static int eth_dev_stop(struct rte_eth_dev *dev) { struct pmd_internal *internal = dev->data->dev_private; + dev->data->dev_started = 0; rte_atomic32_set(&internal->started, 0); update_queuing_status(dev); + + return 0; } static int @@ -1169,7 +1170,7 @@ eth_dev_close(struct rte_eth_dev *dev) { struct pmd_internal *internal; struct internal_list *list; - unsigned int i; + unsigned int i, ret; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -1178,7 +1179,7 @@ eth_dev_close(struct rte_eth_dev *dev) if (!internal) return 0; - eth_dev_stop(dev); + ret = eth_dev_stop(dev); list = find_internal_resource(internal->iface_name); if (list) { @@ -1205,7 +1206,7 @@ eth_dev_close(struct rte_eth_dev *dev) rte_free(vring_states[dev->data->port_id]); vring_states[dev->data->port_id] = NULL; - return 0; + return ret; } static int @@ -1446,7 +1447,8 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name, internal->flags = flags; internal->disable_flags = disable_flags; data->dev_link = pmd_link; - data->dev_flags = RTE_ETH_DEV_INTR_LSC; + data->dev_flags = RTE_ETH_DEV_INTR_LSC | + RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; data->promiscuous = 1; data->all_multicast = 1; @@ -1506,7 +1508,6 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev) uint64_t flags = 0; uint64_t disable_flags = 0; int client_mode = 0; - int dequeue_zero_copy = 0; int iommu_support = 0; int postcopy_support = 0; int tso = 0; @@ -1566,16 +1567,6 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev) flags |= RTE_VHOST_USER_CLIENT; } - if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) { - ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY, - &open_int, &dequeue_zero_copy); - if (ret < 0) - goto out_free; - - if (dequeue_zero_copy) - flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY; - } - if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) { ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT, &open_int, &iommu_support); @@ -1675,7 +1666,6 @@ RTE_PMD_REGISTER_PARAM_STRING(net_vhost, "iface= " "queues= " "client=<0|1> " - "dequeue-zero-copy=<0|1> " "iommu-support=<0|1> " "postcopy-support=<0|1> " "tso=<0|1> "