txr = hdr_mz->addr;
memset(txr, 0, vq_size * sizeof(*txr));
for (i = 0; i < vq_size; i++) {
- struct vring_desc *start_dp = txr[i].tx_indir;
-
/* first indirect descriptor is always the tx header */
if (!vtpci_packed_queue(hw)) {
+ struct vring_desc *start_dp = txr[i].tx_indir;
vring_desc_init_split(start_dp,
RTE_DIM(txr[i].tx_indir));
start_dp->addr = txvq->virtio_net_hdr_mem
tx_hdr);
start_dp->len = hw->vtnet_hdr_size;
start_dp->flags = VRING_DESC_F_NEXT;
+ } else {
+ struct vring_packed_desc *start_dp =
+ txr[i].tx_packed_indir;
+ vring_desc_init_indirect_packed(start_dp,
+ RTE_DIM(txr[i].tx_packed_indir));
+ start_dp->addr = txvq->virtio_net_hdr_mem
+ + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region,
+ tx_hdr);
+ start_dp->len = hw->vtnet_hdr_size;
}
}
}
#define VIRTIO_MAX_TX_INDIRECT 8
struct virtio_tx_region {
struct virtio_net_hdr_mrg_rxbuf tx_hdr;
- struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
- __rte_aligned(16);
+ union {
+ struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT];
+ struct vring_packed_desc
+ tx_packed_indir[VIRTIO_MAX_TX_INDIRECT];
+ } __rte_aligned(16);
};
static inline int
dp[i].next = VQ_RING_DESC_CHAIN_END;
}
+static inline void
+vring_desc_init_indirect_packed(struct vring_packed_desc *dp, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ dp[i].id = (uint16_t)i;
+ dp[i].flags = VRING_DESC_F_WRITE;
+ }
+}
+
/**
* Tell the backend not to interrupt us. Implementation for packed virtqueues.
*/