git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net: add rte prefix to ether defines
[dpdk.git]
/
drivers
/
net
/
virtio
/
virtqueue.h
diff --git
a/drivers/net/virtio/virtqueue.h
b/drivers/net/virtio/virtqueue.h
index
48b3912
..
c6dd4a3
100644
(file)
--- a/
drivers/net/virtio/virtqueue.h
+++ b/
drivers/net/virtio/virtqueue.h
@@
-134,7
+134,7
@@
enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
*/
struct virtio_net_ctrl_mac {
uint32_t entries;
*/
struct virtio_net_ctrl_mac {
uint32_t entries;
- uint8_t macs[][ETHER_ADDR_LEN];
+ uint8_t macs[][
RTE_
ETHER_ADDR_LEN];
} __attribute__((__packed__));
#define VIRTIO_NET_CTRL_MAC 1
} __attribute__((__packed__));
#define VIRTIO_NET_CTRL_MAC 1
@@
-277,12
+277,8
@@
struct virtio_net_hdr_mrg_rxbuf {
#define VIRTIO_MAX_TX_INDIRECT 8
struct virtio_tx_region {
struct virtio_net_hdr_mrg_rxbuf tx_hdr;
#define VIRTIO_MAX_TX_INDIRECT 8
struct virtio_tx_region {
struct virtio_net_hdr_mrg_rxbuf tx_hdr;
- union {
- struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
- __attribute__((__aligned__(16)));
- struct vring_packed_desc tx_indir_pq[VIRTIO_MAX_TX_INDIRECT]
- __attribute__((__aligned__(16)));
- };
+ struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
+ __attribute__((__aligned__(16)));
};
static inline int
};
static inline int
@@
-291,8
+287,8
@@
desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
uint16_t used, avail, flags;
flags = desc->flags;
uint16_t used, avail, flags;
flags = desc->flags;
- used = !!(flags & VRING_
DESC_F_USED(1)
);
- avail = !!(flags & VRING_
DESC_F_AVAIL(1)
);
+ used = !!(flags & VRING_
PACKED_DESC_F_USED
);
+ avail = !!(flags & VRING_
PACKED_DESC_F_AVAIL
);
return avail == used && used == vq->vq_packed.used_wrap_counter;
}
return avail == used && used == vq->vq_packed.used_wrap_counter;
}
@@
-302,10
+298,10
@@
vring_desc_init_packed(struct virtqueue *vq, int n)
{
int i;
for (i = 0; i < n - 1; i++) {
{
int i;
for (i = 0; i < n - 1; i++) {
- vq->vq_packed.ring.desc
_packed
[i].id = i;
+ vq->vq_packed.ring.desc[i].id = i;
vq->vq_descx[i].next = i + 1;
}
vq->vq_descx[i].next = i + 1;
}
- vq->vq_packed.ring.desc
_packed
[i].id = i;
+ vq->vq_packed.ring.desc[i].id = i;
vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
}
vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
}
@@
-321,18
+317,27
@@
vring_desc_init_split(struct vring_desc *dp, uint16_t n)
}
/**
}
/**
- * Tell the backend not to interrupt us.
+ * Tell the backend not to interrupt us.
Implementation for packed virtqueues.
*/
static inline void
virtqueue_disable_intr_packed(struct virtqueue *vq)
{
if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
*/
static inline void
virtqueue_disable_intr_packed(struct virtqueue *vq)
{
if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
- vq->vq_packed.ring.driver
_event
->desc_event_flags =
+ vq->vq_packed.ring.driver->desc_event_flags =
vq->vq_packed.event_flags_shadow;
}
}
vq->vq_packed.event_flags_shadow;
}
}
+/**
+ * Tell the backend not to interrupt us. Implementation for split virtqueues.
+ */
+static inline void
+virtqueue_disable_intr_split(struct virtqueue *vq)
+{
+ vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
/**
* Tell the backend not to interrupt us.
*/
/**
* Tell the backend not to interrupt us.
*/
@@
-342,7
+347,7
@@
virtqueue_disable_intr(struct virtqueue *vq)
if (vtpci_packed_queue(vq->hw))
virtqueue_disable_intr_packed(vq);
else
if (vtpci_packed_queue(vq->hw))
virtqueue_disable_intr_packed(vq);
else
- v
q->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT
;
+ v
irtqueue_disable_intr_split(vq)
;
}
/**
}
/**
@@
-353,7
+358,7
@@
virtqueue_enable_intr_packed(struct virtqueue *vq)
{
if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
{
if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
- vq->vq_packed.ring.driver
_event
->desc_event_flags =
+ vq->vq_packed.ring.driver->desc_event_flags =
vq->vq_packed.event_flags_shadow;
}
}
vq->vq_packed.event_flags_shadow;
}
}
@@
-460,7
+465,7
@@
virtqueue_kick_prepare_packed(struct virtqueue *vq)
* Ensure updated data is visible to vhost before reading the flags.
*/
virtio_mb(vq->hw->weak_barriers);
* Ensure updated data is visible to vhost before reading the flags.
*/
virtio_mb(vq->hw->weak_barriers);
- flags = vq->vq_packed.ring.device
_event
->desc_event_flags;
+ flags = vq->vq_packed.ring.device->desc_event_flags;
return flags != RING_EVENT_FLAGS_DISABLE;
}
return flags != RING_EVENT_FLAGS_DISABLE;
}