#define VIRTQUEUE_MAX_NAME_SZ 32
+/**
+ * Return the IOVA (or virtual address in case of virtio-user) of mbuf
+ * data buffer.
+ *
+ * The address is firstly casted to the word size (sizeof(uintptr_t))
+ * before casting it to uint64_t. This is to make it work with different
+ * combination of word size (64 bit and 32 bit) and virtio device
+ * (virtio-pci and virtio-user).
+ */
+#define VIRTIO_MBUF_ADDR(mb, vq) \
+ ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset)))
+
+/**
+ * Return the physical address (or virtual address in case of
+ * virtio-user) of mbuf data buffer, taking care of mbuf data offset
+ */
+#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
+ (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
+
#define VTNET_SQ_RQ_QUEUE_IDX 0
#define VTNET_SQ_TQ_QUEUE_IDX 1
#define VTNET_SQ_CQ_QUEUE_IDX 2
void *vq_ring_virt_mem; /**< linear address of vring*/
unsigned int vq_ring_size;
+ uint16_t mbuf_addr_offset;
union {
struct virtnet_rx rxq;
virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
{
uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
+ uint16_t o_l23_len = (cookie->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ cookie->outer_l2_len + cookie->outer_l3_len : 0;
if (cookie->ol_flags & PKT_TX_TCP_SEG)
csum_l4 |= PKT_TX_TCP_CKSUM;
switch (csum_l4) {
case PKT_TX_UDP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_start = o_l23_len + cookie->l2_len + cookie->l3_len;
hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
case PKT_TX_TCP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_start = o_l23_len + cookie->l2_len + cookie->l3_len;
hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
VIRTIO_NET_HDR_GSO_TCPV6 :
VIRTIO_NET_HDR_GSO_TCPV4;
hdr->gso_size = cookie->tso_segsz;
- hdr->hdr_len = cookie->l2_len + cookie->l3_len + cookie->l4_len;
+ hdr->hdr_len = o_l23_len + cookie->l2_len + cookie->l3_len +
+ cookie->l4_len;
} else {
ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
start_dp[idx].len = (seg_num + 1) *
sizeof(struct vring_packed_desc);
+ /* Packed descriptor id needs to be restored when inorder. */
+ if (in_order)
+ start_dp[idx].id = idx;
/* reset flags for indirect desc */
head_flags = VRING_DESC_F_INDIRECT;
head_flags |= vq->vq_packed.cached_flags;
do {
uint16_t flags;
- start_dp[idx].addr = rte_mbuf_data_iova(cookie);
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
if (prepend_header) {
start_dp[idx].addr -= head_size;
}
static void
-virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
+virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, uint16_t num)
{
uint16_t used_idx, id, curr_id, free_cnt = 0;
uint16_t size = vq->vq_nentries;
struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
struct vq_desc_extra *dxp;
+ int nb = num;
used_idx = vq->vq_used_cons_idx;
/* desc_is_used has a load-acquire or rte_io_rmb inside
* and wait for used desc in virtqueue.
*/
- while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
+ while (nb > 0 && desc_is_used(&desc[used_idx], vq)) {
id = desc[used_idx].id;
do {
curr_id = used_idx;
dxp = &vq->vq_descx[used_idx];
used_idx += dxp->ndescs;
free_cnt += dxp->ndescs;
- num -= dxp->ndescs;
+ nb -= dxp->ndescs;
if (used_idx >= size) {
used_idx -= size;
vq->vq_packed.used_wrap_counter ^= 1;
}
static void
-virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
+virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, uint16_t num)
{
uint16_t used_idx, id;
uint16_t size = vq->vq_nentries;
/* Cleanup from completed transmits. */
static inline void
-virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)
+virtio_xmit_cleanup_packed(struct virtqueue *vq, uint16_t num, int in_order)
{
if (in_order)
virtio_xmit_cleanup_inorder_packed(vq, num);