struct vring_desc *start_dp;
struct virtio_net_hdr *hdr;
uint16_t idx;
- uint16_t head_size = vq->hw->vtnet_hdr_size;
+ int16_t head_size = vq->hw->vtnet_hdr_size;
uint16_t i = 0;
idx = vq->vq_desc_head_idx;
dxp->ndescs = 1;
virtio_update_packet_stats(&txvq->stats, cookies[i]);
- hdr = (struct virtio_net_hdr *)(char *)cookies[i]->buf_addr +
- cookies[i]->data_off - head_size;
+ hdr = rte_pktmbuf_mtod_offset(cookies[i],
+ struct virtio_net_hdr *, -head_size);
/* if offload disabled, hdr is not zeroed yet, do it now */
if (!vq->hw->has_tx_offload)
else
virtqueue_xmit_offload(hdr, cookies[i], true);
- start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
+ start_dp[idx].addr =
+ VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
start_dp[idx].len = cookies[i]->data_len + head_size;
start_dp[idx].flags = 0;
struct vring_packed_desc *dp;
struct vq_desc_extra *dxp;
uint16_t idx, id, flags;
- uint16_t head_size = vq->hw->vtnet_hdr_size;
+ int16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
flags = vq->vq_packed.cached_flags;
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
- cookie->data_off - head_size;
+ hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+ -head_size);
/* if offload disabled, hdr is not zeroed yet, do it now */
if (!vq->hw->has_tx_offload)
else
virtqueue_xmit_offload(hdr, cookie, true);
- dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+ dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
dp->len = cookie->data_len + head_size;
dp->id = id;
struct virtqueue *vq = txvq->vq;
struct vring_packed_desc *start_dp, *head_dp;
uint16_t idx, id, head_idx, head_flags;
- uint16_t head_size = vq->hw->vtnet_hdr_size;
+ int16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
uint16_t prev;
bool prepend_header = false;
if (can_push) {
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
- cookie->data_off - head_size;
+ hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+ -head_size);
prepend_header = true;
/* if offload disabled, it is not zeroed below, do it now */
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
if (prepend_header) {
+ start_dp[idx].addr -= head_size;
start_dp[idx].len += head_size;
prepend_header = false;
}
struct vring_desc *start_dp;
uint16_t seg_num = cookie->nb_segs;
uint16_t head_idx, idx;
- uint16_t head_size = vq->hw->vtnet_hdr_size;
+ int16_t head_size = vq->hw->vtnet_hdr_size;
bool prepend_header = false;
struct virtio_net_hdr *hdr;
if (can_push) {
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
- cookie->data_off - head_size;
+ hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+ -head_size);
prepend_header = true;
/* if offload disabled, it is not zeroed below, do it now */
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
if (prepend_header) {
+ start_dp[idx].addr -= head_size;
start_dp[idx].len += head_size;
prepend_header = false;
}
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
+ const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
PMD_INIT_FUNC_TRACE();
+ if (rx_conf->rx_deferred_start) {
+ PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
+ return -EINVAL;
+ }
+
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
PMD_INIT_FUNC_TRACE();
+ if (tx_conf->tx_deferred_start) {
+ PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
+ return -EINVAL;
+ }
+
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);