static int virtio_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static int virtio_intr_enable(struct rte_eth_dev *dev);
static int virtio_intr_disable(struct rte_eth_dev *dev);
static int virtio_dev_queue_stats_mapping_set(
struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
static struct virtio_pmd_ctrl *
-virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
- int *dlen, int pkt_num)
+virtio_send_command_packed(struct virtnet_ctl *cvq,
+ struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
{
struct virtqueue *vq = cvq->vq;
int head;
- struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+ struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
struct virtio_pmd_ctrl *result;
- int wrap_counter;
uint16_t flags;
int sum = 0;
+ int nb_descs = 0;
int k;
/*
* One RX packet for ACK.
*/
head = vq->vq_avail_idx;
- wrap_counter = vq->avail_wrap_counter;
- desc[head].flags = VRING_DESC_F_NEXT;
+ flags = vq->vq_packed.cached_flags;
desc[head].addr = cvq->virtio_net_hdr_mem;
desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
vq->vq_free_cnt--;
+ nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
+ vq->vq_packed.cached_flags ^=
+ VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
for (k = 0; k < pkt_num; k++) {
+ sizeof(struct virtio_net_ctrl_hdr)
+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
desc[vq->vq_avail_idx].len = dlen[k];
- flags = VRING_DESC_F_NEXT;
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
+ vq->vq_packed.cached_flags;
sum += dlen[k];
vq->vq_free_cnt--;
- flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
- desc[vq->vq_avail_idx].flags = flags;
- rte_smp_wmb();
- vq->vq_free_cnt--;
+ nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
+ vq->vq_packed.cached_flags ^=
+ VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
}
-
desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
- flags = VRING_DESC_F_WRITE;
- flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
- desc[vq->vq_avail_idx].flags = flags;
- flags = VRING_DESC_F_NEXT;
- flags |= VRING_DESC_F_AVAIL(wrap_counter) |
- VRING_DESC_F_USED(!wrap_counter);
- desc[head].flags = flags;
- rte_smp_wmb();
-
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
+ vq->vq_packed.cached_flags;
vq->vq_free_cnt--;
+ nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
+ vq->vq_packed.cached_flags ^=
+ VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
+ virtio_wmb(vq->hw->weak_barriers);
+ desc[head].flags = VRING_DESC_F_NEXT | flags;
+
+ virtio_wmb(vq->hw->weak_barriers);
virtqueue_notify(vq);
/* wait for used descriptors in virtqueue */
- do {
- rte_rmb();
+ while (!desc_is_used(&desc[head], vq))
usleep(100);
- } while (!desc_is_used(&desc[head], vq));
+
+ virtio_rmb(vq->hw->weak_barriers);
/* now get used descriptors */
- while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
- vq->vq_free_cnt++;
- if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
- vq->vq_used_cons_idx -= vq->vq_nentries;
- vq->used_wrap_counter ^= 1;
- }
- }
+ vq->vq_free_cnt += nb_descs;
+ vq->vq_used_cons_idx += nb_descs;
+ if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->vq_packed.used_wrap_counter ^= 1;
+ }
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
+ "vq->vq_avail_idx=%d\n"
+ "vq->vq_used_cons_idx=%d\n"
+ "vq->vq_packed.cached_flags=0x%x\n"
+ "vq->vq_packed.used_wrap_counter=%d\n",
+ vq->vq_free_cnt,
+ vq->vq_avail_idx,
+ vq->vq_used_cons_idx,
+ vq->vq_packed.cached_flags,
+ vq->vq_packed.used_wrap_counter);
result = cvq->virtio_net_hdr_mz->addr;
return result;
}
-static int
-virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
- int *dlen, int pkt_num)
+static struct virtio_pmd_ctrl *
+virtio_send_command_split(struct virtnet_ctl *cvq,
+ struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
{
+ struct virtio_pmd_ctrl *result;
+ struct virtqueue *vq = cvq->vq;
uint32_t head, i;
int k, sum = 0;
- virtio_net_ctrl_ack status = ~0;
- struct virtio_pmd_ctrl *result;
- struct virtqueue *vq;
-
- ctrl->status = status;
- if (!cvq || !cvq->vq) {
- PMD_INIT_LOG(ERR, "Control queue is not supported.");
- return -1;
- }
-
- rte_spinlock_lock(&cvq->lock);
- vq = cvq->vq;
head = vq->vq_desc_head_idx;
- PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
- "vq->hw->cvq = %p vq = %p",
- vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
-
- if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
- rte_spinlock_unlock(&cvq->lock);
- return -1;
- }
-
- memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
- sizeof(struct virtio_pmd_ctrl));
-
- if (vtpci_packed_queue(vq->hw)) {
- result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
- goto out_unlock;
- }
-
/*
* Format is enforced in qemu code:
* One TX packet for header;
* At least one TX packet per argument;
* One RX packet for ACK.
*/
- vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
- vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;
- vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
+ vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
+ vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
vq->vq_free_cnt--;
- i = vq->vq_ring.desc[head].next;
+ i = vq->vq_split.ring.desc[head].next;
for (k = 0; k < pkt_num; k++) {
- vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
- vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
+ vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr)
+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;
- vq->vq_ring.desc[i].len = dlen[k];
+ vq->vq_split.ring.desc[i].len = dlen[k];
sum += dlen[k];
vq->vq_free_cnt--;
- i = vq->vq_ring.desc[i].next;
+ i = vq->vq_split.ring.desc[i].next;
}
- vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
- vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
+ vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
- vq->vq_ring.desc[i].len = sizeof(ctrl->status);
+ vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
vq->vq_free_cnt--;
- vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
+ vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
vq_update_avail_ring(vq, head);
vq_update_avail_idx(vq);
used_idx = (uint32_t)(vq->vq_used_cons_idx
& (vq->vq_nentries - 1));
- uep = &vq->vq_ring.used->ring[used_idx];
+ uep = &vq->vq_split.ring.used->ring[used_idx];
idx = (uint32_t) uep->id;
desc_idx = idx;
- while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
- desc_idx = vq->vq_ring.desc[desc_idx].next;
+ while (vq->vq_split.ring.desc[desc_idx].flags &
+ VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_split.ring.desc[desc_idx].next;
vq->vq_free_cnt++;
}
- vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
vq->vq_desc_head_idx = idx;
vq->vq_used_cons_idx++;
vq->vq_free_cnt, vq->vq_desc_head_idx);
result = cvq->virtio_net_hdr_mz->addr;
+ return result;
+}
+
+static int
+virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
+{
+ virtio_net_ctrl_ack status = ~0;
+ struct virtio_pmd_ctrl *result;
+ struct virtqueue *vq;
+
+ ctrl->status = status;
+
+ if (!cvq || !cvq->vq) {
+ PMD_INIT_LOG(ERR, "Control queue is not supported.");
+ return -1;
+ }
+
+ rte_spinlock_lock(&cvq->lock);
+ vq = cvq->vq;
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
+ "vq->hw->cvq = %p vq = %p",
+ vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
+
+ if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
+ rte_spinlock_unlock(&cvq->lock);
+ return -1;
+ }
+
+ memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
+ sizeof(struct virtio_pmd_ctrl));
+
+ if (vtpci_packed_queue(vq->hw))
+ result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
+ else
+ result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
-out_unlock:
rte_spinlock_unlock(&cvq->lock);
return result->status;
}
virtio_init_vring(struct virtqueue *vq)
{
int size = vq->vq_nentries;
- struct vring *vr = &vq->vq_ring;
uint8_t *ring_mem = vq->vq_ring_virt_mem;
PMD_INIT_FUNC_TRACE();
vq->vq_free_cnt = vq->vq_nentries;
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
if (vtpci_packed_queue(vq->hw)) {
- vring_init_packed(&vq->ring_packed, ring_mem,
+ vring_init_packed(&vq->vq_packed.ring, ring_mem,
VIRTIO_PCI_VRING_ALIGN, size);
vring_desc_init_packed(vq, size);
} else {
+ struct vring *vr = &vq->vq_split.ring;
+
vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
vring_desc_init_split(vr->desc, size);
}
vq->hw = hw;
vq->vq_queue_index = vtpci_queue_idx;
vq->vq_nentries = vq_size;
- vq->event_flags_shadow = 0;
if (vtpci_packed_queue(hw)) {
- vq->avail_wrap_counter = 1;
- vq->used_wrap_counter = 1;
- vq->avail_used_flags =
- VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ vq->vq_packed.used_wrap_counter = 1;
+ vq->vq_packed.cached_flags = VRING_DESC_F_AVAIL(1);
+ vq->vq_packed.event_flags_shadow = 0;
+ if (queue_type == VTNET_RQ)
+ vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
}
/*
memset(txr, 0, vq_size * sizeof(*txr));
for (i = 0; i < vq_size; i++) {
struct vring_desc *start_dp = txr[i].tx_indir;
- struct vring_packed_desc *start_dp_packed =
- txr[i].tx_indir_pq;
/* first indirect descriptor is always the tx header */
- if (vtpci_packed_queue(hw)) {
- start_dp_packed->addr = txvq->virtio_net_hdr_mem
- + i * sizeof(*txr)
- + offsetof(struct virtio_tx_region,
- tx_hdr);
- start_dp_packed->len = hw->vtnet_hdr_size;
- } else {
+ if (!vtpci_packed_queue(hw)) {
vring_desc_init_split(start_dp,
RTE_DIM(txr[i].tx_indir));
start_dp->addr = txvq->virtio_net_hdr_mem
static int
virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ struct virtio_hw *hw = dev->data->dev_private;
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
struct virtqueue *vq = rxvq->vq;
virtqueue_enable_intr(vq);
+ virtio_mb(hw->weak_barriers);
return 0;
}
.mac_addr_set = virtio_mac_addr_set,
};
+/*
+ * dev_ops for virtio-user in secondary processes, as we just have
+ * some limited supports currently.
+ */
+const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = {
+ .dev_infos_get = virtio_dev_info_get,
+ .stats_get = virtio_dev_stats_get,
+ .xstats_get = virtio_dev_xstats_get,
+ .xstats_get_names = virtio_dev_xstats_get_names,
+ .stats_reset = virtio_dev_stats_reset,
+ .xstats_reset = virtio_dev_stats_reset,
+ /* collect stats per queue */
+ .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
+};
+
static void
virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
if (vtpci_packed_queue(hw)) {
PMD_INIT_LOG(INFO,
- "virtio: using packed ring standard Tx path on port %u",
+ "virtio: using packed ring %s Tx path on port %u",
+ hw->use_inorder_tx ? "inorder" : "standard",
eth_dev->data->port_id);
eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
} else {
if (virtio_negotiate_features(hw, req_features) < 0)
return -1;
+ hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
+
if (!hw->virtio_user_dev) {
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
+ hw->vtnet_hdr_size;
uint64_t rx_offloads = rxmode->offloads;
uint64_t tx_offloads = txmode->offloads;
uint64_t req_features;
return ret;
}
+ if (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len)
+ req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
+
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM))
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
if (vtpci_packed_queue(hw)) {
hw->use_simple_rx = 0;
hw->use_inorder_rx = 0;
- hw->use_inorder_tx = 0;
}
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
host_features = VTPCI_OPS(hw)->get_features(hw);
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
DEV_RX_OFFLOAD_TCP_CKSUM |