X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_virtio%2Fvirtqueue.h;h=48b5092c94c8c288acd1117f67ca76a4b2c57e73;hb=3031749c2df04a63cdcef186dcce3781e61436e8;hp=b67c223d8332d1eb106a27ea172cc482bb64c786;hpb=b9a4361fc526a76d876ceb43a6ee6539038239ab;p=dpdk.git diff --git a/lib/librte_pmd_virtio/virtqueue.h b/lib/librte_pmd_virtio/virtqueue.h index b67c223d83..48b5092c94 100644 --- a/lib/librte_pmd_virtio/virtqueue.h +++ b/lib/librte_pmd_virtio/virtqueue.h @@ -1,13 +1,13 @@ /*- * BSD LICENSE - * + * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -88,7 +88,7 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 }; #define VIRTIO_NET_CTRL_RX_ALLUNI 2 #define VIRTIO_NET_CTRL_RX_NOMULTI 3 #define VIRTIO_NET_CTRL_RX_NOUNI 4 -#define VIRTIO_NET_CTRL_RX_NOBCAST 5 +#define VIRTIO_NET_CTRL_RX_NOBCAST 5 /** * Control VLAN filtering @@ -103,6 +103,24 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 }; #define VIRTIO_NET_CTRL_VLAN_ADD 0 #define VIRTIO_NET_CTRL_VLAN_DEL 1 +struct virtio_net_ctrl_hdr { + uint8_t class; + uint8_t cmd; +} __attribute__((packed)); + +typedef uint8_t virtio_net_ctrl_ack; + +#define VIRTIO_NET_OK 0 +#define VIRTIO_NET_ERR 1 + +#define VIRTIO_MAX_CTRL_DATA 128 + +struct virtio_pmd_ctrl { + struct virtio_net_ctrl_hdr hdr; + virtio_net_ctrl_ack status; + uint8_t data[VIRTIO_MAX_CTRL_DATA]; +}; + struct virtqueue { char vq_name[VIRTQUEUE_MAX_NAME_SZ]; struct virtio_hw *hw; /**< virtio_hw structure pointer. */ @@ -142,6 +160,16 @@ struct virtqueue { } vq_descx[0]; }; +/* If multiqueue is provided by host, then we suppport it. */ +#ifndef VIRTIO_NET_F_MQ +/* Device supports Receive Flow Steering */ +#define VIRTIO_NET_F_MQ 0x400000 +#define VIRTIO_NET_CTRL_MQ 4 +#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0 +#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 +#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 +#endif + /** * This is the first element of the scatter-gather list. If you don't * specify GSO or CSUM features, you can simply ignore the header. @@ -204,9 +232,10 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx) uint16_t avail_idx; /* * Place the head of the descriptor chain into the next slot and make - * it usable to the host. We wait to inform the host until after the burst - * is complete to avoid cache alignment issues with descriptors. This - * also helps to avoid any contention on the available index. + * it usable to the host. The chain is made available now rather than + * deferring to virtqueue_notify() in the hopes that if the host is + * currently running on another CPU, we can keep it processing the new + * descriptor. */ avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); vq->vq_ring.avail->ring[avail_idx] = desc_idx; @@ -222,7 +251,7 @@ virtqueue_kick_prepare(struct virtqueue * vq) static inline void __attribute__((always_inline)) virtqueue_notify(struct virtqueue *vq) { - /* + /* * Ensure updated avail->idx is visible to host. mb() necessary? * For virtio on IA, the notificaiton is through io port operation * which is a serialization instruction itself. @@ -242,12 +271,12 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs); if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { while (dp->flags & VRING_DESC_F_NEXT) { - desc_idx_last = dp->next; + desc_idx_last = dp->next; dp = &vq->vq_ring.desc[dp->next]; } } dxp->ndescs = 0; - + /* * We must append the existing free chain, if any, to the end of * newly freed chain. If the virtqueue was completely used, then @@ -259,6 +288,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx]; dp_tail->next = desc_idx; } + vq->vq_desc_tail_idx = desc_idx_last; dp->next = VQ_RING_DESC_CHAIN_END; } @@ -294,7 +324,7 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) idx = start_dp[idx].next; vq->vq_desc_head_idx = idx; if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) - vq->vq_desc_tail_idx = idx; + vq->vq_desc_tail_idx = idx; vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); vq_update_avail_ring(vq, head_idx); @@ -314,7 +344,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie) if (unlikely(txvq->vq_free_cnt < needed)) return (-EMSGSIZE); head_idx = txvq->vq_desc_head_idx; - if (unlikely(head_idx >= txvq->vq_nentries)) + if (unlikely(head_idx >= txvq->vq_nentries)) return (-EFAULT); idx = head_idx; @@ -335,7 +365,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie) idx = start_dp[idx].next; txvq->vq_desc_head_idx = idx; if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) - txvq->vq_desc_tail_idx = idx; + txvq->vq_desc_tail_idx = idx; txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed); vq_update_avail_ring(txvq, head_idx); @@ -357,11 +387,13 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint desc_idx = (uint16_t) uep->id; len[i] = uep->len; cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie; + if (unlikely(cookie == NULL)) { - PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", vq->vq_used_cons_idx); break; } + rte_prefetch0(cookie); rte_packet_prefetch(cookie->pkt.data); rx_pkts[i] = cookie; @@ -369,22 +401,23 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint vq_ring_free_chain(vq, desc_idx); vq->vq_descx[desc_idx].cookie = NULL; } + return (i); } static inline uint16_t __attribute__((always_inline)) virtqueue_dequeue_pkt_tx(struct virtqueue *vq) { - struct vring_used_elem *uep; - uint16_t used_idx, desc_idx; + struct vring_used_elem *uep; + uint16_t used_idx, desc_idx; - used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); - uep = &vq->vq_ring.used->ring[used_idx]; - desc_idx = (uint16_t) uep->id; - vq->vq_used_cons_idx++; - vq_ring_free_chain(vq, desc_idx); + used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; + desc_idx = (uint16_t) uep->id; + vq->vq_used_cons_idx++; + vq_ring_free_chain(vq, desc_idx); - return 0; + return 0; } #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP