ethdev: use constants for link state
[dpdk.git] / drivers / net / virtio / virtio_ethdev.c
index ebefdb4..3ebc221 100644 (file)
@@ -146,9 +146,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
        ctrl->status = status;
 
        if (!(vq && vq->hw->cvq)) {
-               PMD_INIT_LOG(ERR,
-                            "%s(): Control queue is not supported.",
-                            __func__);
+               PMD_INIT_LOG(ERR, "Control queue is not supported.");
                return -1;
        }
        head = vq->vq_desc_head_idx;
@@ -298,12 +296,12 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
        vq_size = hw->vtpci_ops->get_queue_num(hw, vtpci_queue_idx);
        PMD_INIT_LOG(DEBUG, "vq_size: %u nb_desc:%u", vq_size, nb_desc);
        if (vq_size == 0) {
-               PMD_INIT_LOG(ERR, "%s: virtqueue does not exist", __func__);
+               PMD_INIT_LOG(ERR, "virtqueue does not exist");
                return -EINVAL;
        }
 
        if (!rte_is_power_of_2(vq_size)) {
-               PMD_INIT_LOG(ERR, "%s: virtqueue size is not powerof 2", __func__);
+               PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2");
                return -EINVAL;
        }
 
@@ -328,12 +326,11 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
                        RTE_CACHE_LINE_SIZE);
        }
        if (vq == NULL) {
-               PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue", __func__);
+               PMD_INIT_LOG(ERR, "Can not allocate virtqueue");
                return -ENOMEM;
        }
        if (queue_type == VTNET_RQ && vq->sw_ring == NULL) {
-               PMD_INIT_LOG(ERR, "%s: Can not allocate RX soft ring",
-                       __func__);
+               PMD_INIT_LOG(ERR, "Can not allocate RX soft ring");
                rte_free(vq);
                return -ENOMEM;
        }
@@ -387,27 +384,47 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
        vq->virtio_net_hdr_mem = 0;
 
        if (queue_type == VTNET_TQ) {
+               const struct rte_memzone *hdr_mz;
+               struct virtio_tx_region *txr;
+               unsigned int i;
+
                /*
                 * For each xmit packet, allocate a virtio_net_hdr
+                * and indirect ring elements
                 */
                snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d_hdrzone",
-                       dev->data->port_id, queue_idx);
-               vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
-                       vq_size * hw->vtnet_hdr_size,
-                       socket_id, 0, RTE_CACHE_LINE_SIZE);
-               if (vq->virtio_net_hdr_mz == NULL) {
+                        dev->data->port_id, queue_idx);
+               hdr_mz = rte_memzone_reserve_aligned(vq_name,
+                                                    vq_size * sizeof(*txr),
+                                                    socket_id, 0,
+                                                    RTE_CACHE_LINE_SIZE);
+               if (hdr_mz == NULL) {
                        if (rte_errno == EEXIST)
-                               vq->virtio_net_hdr_mz =
-                                       rte_memzone_lookup(vq_name);
-                       if (vq->virtio_net_hdr_mz == NULL) {
+                               hdr_mz = rte_memzone_lookup(vq_name);
+                       if (hdr_mz == NULL) {
                                rte_free(vq);
                                return -ENOMEM;
                        }
                }
-               vq->virtio_net_hdr_mem =
-                       vq->virtio_net_hdr_mz->phys_addr;
-               memset(vq->virtio_net_hdr_mz->addr, 0,
-                       vq_size * hw->vtnet_hdr_size);
+               vq->virtio_net_hdr_mz = hdr_mz;
+               vq->virtio_net_hdr_mem = hdr_mz->phys_addr;
+
+               txr = hdr_mz->addr;
+               memset(txr, 0, vq_size * sizeof(*txr));
+               for (i = 0; i < vq_size; i++) {
+                       struct vring_desc *start_dp = txr[i].tx_indir;
+
+                       vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
+
+                       /* first indirect descriptor is always the tx header */
+                       start_dp->addr = vq->virtio_net_hdr_mem
+                               + i * sizeof(*txr)
+                               + offsetof(struct virtio_tx_region, tx_hdr);
+
+                       start_dp->len = vq->hw->vtnet_hdr_size;
+                       start_dp->flags = VRING_DESC_F_NEXT;
+               }
+
        } else if (queue_type == VTNET_CQ) {
                /* Allocate a page for control vq command, data and status */
                snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone",
@@ -1393,16 +1410,16 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
                                offsetof(struct virtio_net_config, status),
                                &status, sizeof(status));
                if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
-                       link.link_status = 0;
+                       link.link_status = ETH_LINK_DOWN;
                        PMD_INIT_LOG(DEBUG, "Port %d is down",
                                     dev->data->port_id);
                } else {
-                       link.link_status = 1;
+                       link.link_status = ETH_LINK_UP;
                        PMD_INIT_LOG(DEBUG, "Port %d is up",
                                     dev->data->port_id);
                }
        } else {
-               link.link_status = 1;   /* Link up */
+               link.link_status = ETH_LINK_UP;
        }
        virtio_dev_atomic_write_link_status(dev, &link);