struct virtio_pmd_ctrl *ctrl,
int *dlen, int pkt_num)
{
- struct virtqueue *vq = cvq->vq;
+ struct virtqueue *vq = virtnet_cq_to_vq(cvq);
int head;
struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
struct virtio_pmd_ctrl *result;
int *dlen, int pkt_num)
{
struct virtio_pmd_ctrl *result;
- struct virtqueue *vq = cvq->vq;
+ struct virtqueue *vq = virtnet_cq_to_vq(cvq);
uint32_t head, i;
int k, sum = 0;
ctrl->status = status;
- if (!cvq || !cvq->vq) {
+ if (!cvq) {
PMD_INIT_LOG(ERR, "Control queue is not supported.");
return -1;
}
rte_spinlock_lock(&cvq->lock);
- vq = cvq->vq;
+ vq = virtnet_cq_to_vq(cvq);
PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
"vq->hw->cvq = %p vq = %p",
vq->sw_ring = sw_ring;
rxvq = &vq->rxq;
- rxvq->vq = vq;
rxvq->port_id = dev->data->port_id;
rxvq->mz = mz;
} else if (queue_type == VTNET_TQ) {
txvq = &vq->txq;
- txvq->vq = vq;
txvq->port_id = dev->data->port_id;
txvq->mz = mz;
txvq->virtio_net_hdr_mz = hdr_mz;
txvq->virtio_net_hdr_mem = hdr_mz->iova;
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
- cvq->vq = vq;
cvq->mz = mz;
cvq->virtio_net_hdr_mz = hdr_mz;
cvq->virtio_net_hdr_mem = hdr_mz->iova;
{
struct virtio_hw *hw = dev->data->dev_private;
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
virtqueue_enable_intr(vq);
virtio_mb(hw->weak_barriers);
virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
virtqueue_disable_intr(vq);
return 0;
virtio_dev_start(struct rte_eth_dev *dev)
{
uint16_t nb_queues, i;
- struct virtnet_rx *rxvq;
- struct virtnet_tx *txvq __rte_unused;
+ struct virtqueue *vq;
struct virtio_hw *hw = dev->data->dev_private;
int ret;
PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxvq = dev->data->rx_queues[i];
+ vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
/* Flush the old packets */
- virtqueue_rxvq_flush(rxvq->vq);
- virtqueue_notify(rxvq->vq);
+ virtqueue_rxvq_flush(vq);
+ virtqueue_notify(vq);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txvq = dev->data->tx_queues[i];
- virtqueue_notify(txvq->vq);
+ vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
+ virtqueue_notify(vq);
}
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxvq = dev->data->rx_queues[i];
- VIRTQUEUE_DUMP(rxvq->vq);
+ vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
+ VIRTQUEUE_DUMP(vq);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txvq = dev->data->tx_queues[i];
- VIRTQUEUE_DUMP(txvq->vq);
+ vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
+ VIRTQUEUE_DUMP(vq);
}
set_rxtx_funcs(dev);
virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
{
struct virtnet_rx *rxvq = rxq;
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
return virtqueue_nused(vq) >= offset;
}
uint16_t num)
{
struct vq_desc_extra *dxp;
- struct virtqueue *vq = txvq->vq;
+ struct virtqueue *vq = virtnet_txq_to_vq(txvq);
struct vring_desc *start_dp;
struct virtio_net_hdr *hdr;
uint16_t idx;
struct rte_mbuf *cookie,
int in_order)
{
- struct virtqueue *vq = txvq->vq;
+ struct virtqueue *vq = virtnet_txq_to_vq(txvq);
struct vring_packed_desc *dp;
struct vq_desc_extra *dxp;
uint16_t idx, id, flags;
{
struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
struct vq_desc_extra *dxp;
- struct virtqueue *vq = txvq->vq;
+ struct virtqueue *vq = virtnet_txq_to_vq(txvq);
struct vring_desc *start_dp;
uint16_t seg_num = cookie->nb_segs;
uint16_t head_idx, idx;
{
struct virtio_hw *hw = dev->data->dev_private;
- if (hw->cvq && hw->cvq->vq) {
+ if (hw->cvq) {
rte_spinlock_init(&hw->cvq->lock);
- VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
+ VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
}
}
virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct virtnet_rx *rxvq = rx_queue;
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
uint16_t nb_used, num, nb_rx;
uint16_t nb_pkts)
{
struct virtnet_rx *rxvq = rx_queue;
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
uint16_t num, nb_rx;
uint16_t nb_pkts)
{
struct virtnet_rx *rxvq = rx_queue;
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
struct rte_mbuf *prev = NULL;
uint16_t nb_pkts)
{
struct virtnet_rx *rxvq = rx_queue;
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
struct rte_mbuf *prev = NULL;
uint16_t nb_pkts)
{
struct virtnet_rx *rxvq = rx_queue;
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm;
struct rte_mbuf *prev = NULL;
uint16_t nb_pkts)
{
struct virtnet_tx *txvq = tx_queue;
- struct virtqueue *vq = txvq->vq;
+ struct virtqueue *vq = virtnet_txq_to_vq(txvq);
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_tx = 0;
virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct virtnet_tx *txvq = tx_queue;
- struct virtqueue *vq = txvq->vq;
+ struct virtqueue *vq = virtnet_txq_to_vq(txvq);
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_used, nb_tx = 0;
uint16_t nb_pkts)
{
struct virtnet_tx *txvq = tx_queue;
- struct virtqueue *vq = txvq->vq;
+ struct virtqueue *vq = virtnet_txq_to_vq(txvq);
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
};
struct virtnet_rx {
- struct virtqueue *vq;
/* dummy mbuf, for wraparound when processing RX ring. */
struct rte_mbuf fake_mbuf;
uint64_t mbuf_initializer; /**< value to init mbufs. */
};
struct virtnet_tx {
- struct virtqueue *vq;
/**< memzone to populate hdr. */
const struct rte_memzone *virtio_net_hdr_mz;
rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
};
struct virtnet_ctl {
- struct virtqueue *vq;
/**< memzone to populate hdr. */
const struct rte_memzone *virtio_net_hdr_mz;
rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
uint16_t nb_pkts)
{
struct virtnet_tx *txvq = tx_queue;
- struct virtqueue *vq = txvq->vq;
+ struct virtqueue *vq = virtnet_txq_to_vq(txvq);
struct virtio_hw *hw = vq->hw;
uint16_t nb_tx = 0;
uint16_t remained;
uint16_t nb_pkts)
{
struct virtnet_rx *rxvq = rx_queue;
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
uint16_t num, nb_rx = 0;
uint32_t nb_enqueued = 0;
virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
struct rte_mbuf *txm)
{
- struct virtqueue *vq = txvq->vq;
+ struct virtqueue *vq = virtnet_txq_to_vq(txvq);
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t slots, can_push = 0, use_indirect = 0;
{
uint16_t used_idx, id;
uint32_t len;
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
uint32_t hdr_size = hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
struct rte_mbuf **cookie,
uint16_t num)
{
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
uint16_t flags = vq->vq_packed.cached_flags;
struct virtio_hw *hw = vq->hw;
virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
struct rte_mbuf **tx_pkts)
{
- struct virtqueue *vq = txvq->vq;
+ struct virtqueue *vq = virtnet_txq_to_vq(txvq);
uint16_t head_size = vq->hw->vtnet_hdr_size;
uint16_t idx = vq->vq_avail_idx;
struct virtio_net_hdr *hdr;
virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
struct rte_mbuf **rx_pkts)
{
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint64_t addrs[PACKED_BATCH_SIZE];
virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
struct rte_mbuf **tx_pkts)
{
- struct virtqueue *vq = txvq->vq;
+ struct virtqueue *vq = virtnet_txq_to_vq(txvq);
uint16_t head_size = vq->hw->vtnet_hdr_size;
uint16_t idx = vq->vq_avail_idx;
struct virtio_net_hdr *hdr;
virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
struct rte_mbuf **rx_pkts)
{
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
uint16_t head_size = hw->vtnet_hdr_size;
uint16_t id = vq->vq_used_cons_idx;
struct rte_mbuf **sw_ring;
struct vring_desc *start_dp;
int ret;
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
sw_ring = &vq->sw_ring[desc_idx];
uint16_t nb_pkts)
{
struct virtnet_rx *rxvq = rx_queue;
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
uint16_t nb_used, nb_total;
uint16_t desc_idx;
uint16_t nb_pkts)
{
struct virtnet_rx *rxvq = rx_queue;
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
uint16_t nb_used, nb_total;
uint16_t desc_idx;
uint16_t nb_pkts)
{
struct virtnet_rx *rxvq = rx_queue;
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
uint16_t nb_used, nb_total;
uint16_t desc_idx;
/* Vring reset for each Tx queue and Rx queue. */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxvq = eth_dev->data->rx_queues[i];
- virtqueue_rxvq_reset_packed(rxvq->vq);
+ virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq));
virtio_dev_rx_queue_setup_finish(eth_dev, i);
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
txvq = eth_dev->data->tx_queues[i];
- virtqueue_txvq_reset_packed(txvq->vq);
+ virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq));
}
hw->started = 1;
uint64_t buf = 1;
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
- if (hw->cvq && (hw->cvq->vq == vq)) {
+ if (hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) {
if (virtio_with_packed_queue(vq->hw))
virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
else
uint16_t next;
};
+#define virtnet_rxq_to_vq(rxvq) container_of(rxvq, struct virtqueue, rxq)
+#define virtnet_txq_to_vq(txvq) container_of(txvq, struct virtqueue, txq)
+#define virtnet_cq_to_vq(cvq) container_of(cvq, struct virtqueue, cq)
+
struct virtqueue {
struct virtio_hw *hw; /**< virtio_hw structure pointer. */
union {
{
struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
struct vq_desc_extra *dxp;
- struct virtqueue *vq = txvq->vq;
+ struct virtqueue *vq = virtnet_txq_to_vq(txvq);
struct vring_packed_desc *start_dp, *head_dp;
uint16_t idx, id, head_idx, head_flags;
int16_t head_size = vq->hw->vtnet_hdr_size;