net/virtio: add speed capability
[dpdk.git] / drivers / net / virtio / virtio_ethdev.c
index d580257..4dc03b9 100644 (file)
@@ -630,7 +630,7 @@ virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
        int ret;
 
        if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
-               PMD_INIT_LOG(INFO, "host does not support rx control\n");
+               PMD_INIT_LOG(INFO, "host does not support rx control");
                return;
        }
 
@@ -653,7 +653,7 @@ virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
        int ret;
 
        if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
-               PMD_INIT_LOG(INFO, "host does not support rx control\n");
+               PMD_INIT_LOG(INFO, "host does not support rx control");
                return;
        }
 
@@ -676,7 +676,7 @@ virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
        int ret;
 
        if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
-               PMD_INIT_LOG(INFO, "host does not support rx control\n");
+               PMD_INIT_LOG(INFO, "host does not support rx control");
                return;
        }
 
@@ -699,7 +699,7 @@ virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
        int ret;
 
        if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
-               PMD_INIT_LOG(INFO, "host does not support rx control\n");
+               PMD_INIT_LOG(INFO, "host does not support rx control");
                return;
        }
 
@@ -723,7 +723,7 @@ virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        uint32_t frame_size = mtu + ether_hdr_len;
 
        if (mtu < ETHER_MIN_MTU || frame_size > VIRTIO_MAX_RX_PKTLEN) {
-               PMD_INIT_LOG(ERR, "MTU should be between %d and %d\n",
+               PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
                        ETHER_MIN_MTU, VIRTIO_MAX_RX_PKTLEN - ether_hdr_len);
                return -EINVAL;
        }
@@ -1233,7 +1233,7 @@ virtio_queues_bind_intr(struct rte_eth_dev *dev)
        uint32_t i;
        struct virtio_hw *hw = dev->data->dev_private;
 
-       PMD_INIT_LOG(INFO, "queue/interrupt binding\n");
+       PMD_INIT_LOG(INFO, "queue/interrupt binding");
        for (i = 0; i < dev->data->nb_rx_queues; ++i) {
                dev->intr_handle->intr_vec[i] = i + 1;
                if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
@@ -1252,7 +1252,7 @@ virtio_queues_unbind_intr(struct rte_eth_dev *dev)
        uint32_t i;
        struct virtio_hw *hw = dev->data->dev_private;
 
-       PMD_INIT_LOG(INFO, "queue/interrupt unbinding\n");
+       PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
        for (i = 0; i < dev->data->nb_rx_queues; ++i)
                VTPCI_OPS(hw)->set_queue_irq(hw,
                                             hw->vqs[i * VTNET_CQ],
@@ -1461,9 +1461,12 @@ virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
 static void
 virtio_set_vtpci_ops(struct virtio_hw *hw)
 {
+#ifdef RTE_VIRTIO_USER
        if (hw->virtio_user_dev)
                VTPCI_OPS(hw) = &virtio_user_ops;
-       else if (hw->modern)
+       else
+#endif
+       if (hw->modern)
                VTPCI_OPS(hw) = &modern_ops;
        else
                VTPCI_OPS(hw) = &legacy_ops;
@@ -1512,6 +1515,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
                return -ENOMEM;
        }
 
+       hw->port_id = eth_dev->data->port_id;
        /* For virtio_user case the hw->virtio_user_dev is populated by
         * virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
         */
@@ -1522,7 +1526,6 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
                        return ret;
        }
 
-       hw->port_id = eth_dev->data->port_id;
        eth_dev->data->dev_flags = dev_flags;
 
        /* reset device and negotiate default features */
@@ -1829,9 +1832,11 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
 static void
 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-       uint64_t tso_mask;
+       uint64_t tso_mask, host_features;
        struct virtio_hw *hw = dev->data->dev_private;
 
+       dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
+
        dev_info->pci_dev = dev->device ? RTE_DEV_TO_PCI(dev->device) : NULL;
        dev_info->max_rx_queues =
                RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
@@ -1843,18 +1848,25 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->default_txconf = (struct rte_eth_txconf) {
                .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
        };
-       dev_info->rx_offload_capa =
-               DEV_RX_OFFLOAD_TCP_CKSUM |
-               DEV_RX_OFFLOAD_UDP_CKSUM |
-               DEV_RX_OFFLOAD_TCP_LRO;
-       dev_info->tx_offload_capa = 0;
 
+       host_features = VTPCI_OPS(hw)->get_features(hw);
+       dev_info->rx_offload_capa = 0;
+       if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
+               dev_info->rx_offload_capa |=
+                       DEV_RX_OFFLOAD_TCP_CKSUM |
+                       DEV_RX_OFFLOAD_UDP_CKSUM;
+       }
+       tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+               (1ULL << VIRTIO_NET_F_GUEST_TSO6);
+       if ((host_features & tso_mask) == tso_mask)
+               dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+
+       dev_info->tx_offload_capa = 0;
        if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
                dev_info->tx_offload_capa |=
                        DEV_TX_OFFLOAD_UDP_CKSUM |
                        DEV_TX_OFFLOAD_TCP_CKSUM;
        }
-
        tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
                (1ULL << VIRTIO_NET_F_HOST_TSO6);
        if ((hw->guest_features & tso_mask) == tso_mask)