/*-
* BSD LICENSE
- *
+ *
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#define VIRTIO_NET_CTRL_RX_ALLUNI 2
#define VIRTIO_NET_CTRL_RX_NOMULTI 3
#define VIRTIO_NET_CTRL_RX_NOUNI 4
-#define VIRTIO_NET_CTRL_RX_NOBCAST 5
+#define VIRTIO_NET_CTRL_RX_NOBCAST 5
/**
* Control VLAN filtering
#define VIRTIO_NET_CTRL_VLAN_ADD 0
#define VIRTIO_NET_CTRL_VLAN_DEL 1
+struct virtio_net_ctrl_hdr {
+ uint8_t class;
+ uint8_t cmd;
+} __attribute__((packed));
+
+typedef uint8_t virtio_net_ctrl_ack;
+
+#define VIRTIO_NET_OK 0
+#define VIRTIO_NET_ERR 1
+
+#define VIRTIO_MAX_CTRL_DATA 128
+
+struct virtio_pmd_ctrl {
+ struct virtio_net_ctrl_hdr hdr;
+ virtio_net_ctrl_ack status;
+ uint8_t data[VIRTIO_MAX_CTRL_DATA];
+};
+
struct virtqueue {
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
struct virtio_hw *hw; /**< virtio_hw structure pointer. */
*/
uint16_t vq_used_cons_idx;
uint16_t vq_avail_idx;
- void *virtio_net_hdr_mem; /**< hdr for each xmit packet */
+ phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+
+ /* Statistics */
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
struct vq_desc_extra {
void *cookie;
} vq_descx[0];
};
+/* If multiqueue is provided by host, then we suppport it. */
+#ifndef VIRTIO_NET_F_MQ
+/* Device supports Receive Flow Steering */
+#define VIRTIO_NET_F_MQ 0x400000
+#define VIRTIO_NET_CTRL_MQ 4
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
+#endif
+
/**
* This is the first element of the scatter-gather list. If you don't
* specify GSO or CSUM features, you can simply ignore the header.
/**
* Get all mbufs to be freed.
*/
-struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
+struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
static inline int
virtqueue_full(const struct virtqueue *vq)
{
- return (vq->vq_free_cnt == 0);
+ return vq->vq_free_cnt == 0;
}
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
-static inline void
+static inline void __attribute__((always_inline))
vq_update_avail_idx(struct virtqueue *vq)
{
rte_compiler_barrier();
vq->vq_ring.avail->idx = vq->vq_avail_idx;
}
-static inline void
+static inline void __attribute__((always_inline))
vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
{
uint16_t avail_idx;
/*
* Place the head of the descriptor chain into the next slot and make
- * it usable to the host. We wait to inform the host until after the burst
- * is complete to avoid cache alignment issues with descriptors. This
- * also helps to avoid any contention on the available index.
+ * it usable to the host. The chain is made available now rather than
+ * deferring to virtqueue_notify() in the hopes that if the host is
+ * currently running on another CPU, we can keep it processing the new
+ * descriptor.
*/
avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
vq->vq_ring.avail->ring[avail_idx] = desc_idx;
vq->vq_avail_idx++;
}
-static inline int virtqueue_kick_prepare(struct virtqueue * vq)
+static inline int __attribute__((always_inline))
+virtqueue_kick_prepare(struct virtqueue *vq)
{
return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
-static inline void
+static inline void __attribute__((always_inline))
virtqueue_notify(struct virtqueue *vq)
{
- /*
+ /*
* Ensure updated avail->idx is visible to host. mb() necessary?
* For virtio on IA, the notificaiton is through io port operation
* which is a serialization instruction itself.
VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_NOTIFY, vq->vq_queue_index);
}
-static inline void
+static inline void __attribute__((always_inline))
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp, *dp_tail;
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
while (dp->flags & VRING_DESC_F_NEXT) {
- desc_idx_last = dp->next;
+ desc_idx_last = dp->next;
dp = &vq->vq_ring.desc[dp->next];
}
}
dxp->ndescs = 0;
-
+
/*
* We must append the existing free chain, if any, to the end of
* newly freed chain. If the virtqueue was completely used, then
dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
dp_tail->next = desc_idx;
}
+
vq->vq_desc_tail_idx = desc_idx_last;
dp->next = VQ_RING_DESC_CHAIN_END;
}
-static inline int
+static inline int __attribute__((always_inline))
virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
{
struct vq_desc_extra *dxp;
struct vring_desc *start_dp;
- uint16_t needed;
+ uint16_t needed = 1;
uint16_t head_idx, idx;
- needed = 1;
if (unlikely(vq->vq_free_cnt == 0))
- return (-ENOSPC);
+ return -ENOSPC;
if (unlikely(vq->vq_free_cnt < needed))
- return (-EMSGSIZE);
+ return -EMSGSIZE;
head_idx = vq->vq_desc_head_idx;
if (unlikely(head_idx >= vq->vq_nentries))
- return (-EFAULT);
+ return -EFAULT;
idx = head_idx;
dxp = &vq->vq_descx[idx];
idx = start_dp[idx].next;
vq->vq_desc_head_idx = idx;
if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_tail_idx = idx;
+ vq->vq_desc_tail_idx = idx;
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
vq_update_avail_ring(vq, head_idx);
- return (0);
+ return 0;
}
-static inline int
+static inline int __attribute__((always_inline))
virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
{
struct vq_desc_extra *dxp;
struct vring_desc *start_dp;
- uint16_t needed;
+ uint16_t needed = 2;
uint16_t head_idx, idx;
- needed = 2;
+
if (unlikely(txvq->vq_free_cnt == 0))
- return (-ENOSPC);
+ return -ENOSPC;
if (unlikely(txvq->vq_free_cnt < needed))
- return (-EMSGSIZE);
+ return -EMSGSIZE;
head_idx = txvq->vq_desc_head_idx;
- if (unlikely(head_idx >= txvq->vq_nentries))
- return (-EFAULT);
+ if (unlikely(head_idx >= txvq->vq_nentries))
+ return -EFAULT;
idx = head_idx;
dxp = &txvq->vq_descx[idx];
dxp->ndescs = needed;
start_dp = txvq->vq_ring.desc;
- start_dp[idx].addr = (uint64_t)(uintptr_t)txvq->virtio_net_hdr_mem + idx * sizeof(struct virtio_net_hdr);
+ start_dp[idx].addr =
+ txvq->virtio_net_hdr_mem + idx * sizeof(struct virtio_net_hdr);
start_dp[idx].len = sizeof(struct virtio_net_hdr);
start_dp[idx].flags = VRING_DESC_F_NEXT;
idx = start_dp[idx].next;
idx = start_dp[idx].next;
txvq->vq_desc_head_idx = idx;
if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- txvq->vq_desc_tail_idx = idx;
+ txvq->vq_desc_tail_idx = idx;
txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
vq_update_avail_ring(txvq, head_idx);
- return (0);
+ return 0;
}
-static inline uint16_t
+static inline uint16_t __attribute__((always_inline))
virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num)
{
struct vring_used_elem *uep;
uint16_t i;
/* Caller does the check */
- for (i = 0; i < num ; i ++) {
+ for (i = 0; i < num; i++) {
used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
uep = &vq->vq_ring.used->ring[used_idx];
desc_idx = (uint16_t) uep->id;
len[i] = uep->len;
cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
+
if (unlikely(cookie == NULL)) {
- PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
vq->vq_used_cons_idx);
break;
}
+
rte_prefetch0(cookie);
rte_packet_prefetch(cookie->pkt.data);
rx_pkts[i] = cookie;
vq_ring_free_chain(vq, desc_idx);
vq->vq_descx[desc_idx].cookie = NULL;
}
- return (i);
+
+ return i;
}
-static inline uint16_t
+static inline uint16_t __attribute__((always_inline))
virtqueue_dequeue_pkt_tx(struct virtqueue *vq)
{
- struct vring_used_elem *uep;
- uint16_t used_idx, desc_idx;
+ struct vring_used_elem *uep;
+ uint16_t used_idx, desc_idx;
- used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- uep = &vq->vq_ring.used->ring[used_idx];
- desc_idx = (uint16_t) uep->id;
- vq->vq_used_cons_idx++;
- vq_ring_free_chain(vq, desc_idx);
+ used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t) uep->id;
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
- return 0;
+ return 0;
}
-#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTQUEUE_DUMP(vq) do { \
uint16_t used_idx, nused; \
used_idx = (vq)->vq_ring.used->idx; \
PMD_INIT_LOG(DEBUG, \
"VQ: %s - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
" avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
- " avail.flags=0x%x; used.flags=0x%x\n", \
+ " avail.flags=0x%x; used.flags=0x%x", \
(vq)->vq_name, (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
(vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
(vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \