memset(txr, 0, vq_size * sizeof(*txr));
for (i = 0; i < vq_size; i++) {
struct vring_desc *start_dp = txr[i].tx_indir;
- struct vring_packed_desc *start_dp_packed =
- txr[i].tx_indir_pq;
/* first indirect descriptor is always the tx header */
- if (vtpci_packed_queue(hw)) {
- start_dp_packed->addr = txvq->virtio_net_hdr_mem
- + i * sizeof(*txr)
- + offsetof(struct virtio_tx_region,
- tx_hdr);
- start_dp_packed->len = hw->vtnet_hdr_size;
- } else {
+ if (!vtpci_packed_queue(hw)) {
vring_desc_init_split(start_dp,
RTE_DIM(txr[i].tx_indir));
start_dp->addr = txvq->virtio_net_hdr_mem
#define VIRTIO_MAX_TX_INDIRECT 8
struct virtio_tx_region {
struct virtio_net_hdr_mrg_rxbuf tx_hdr;
- union {
- struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
- __attribute__((__aligned__(16)));
- struct vring_packed_desc tx_indir_pq[VIRTIO_MAX_TX_INDIRECT]
- __attribute__((__aligned__(16)));
- };
+ struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
+ __attribute__((__aligned__(16)));
};
static inline int