struct vring_desc *start_dp;
struct virtio_net_hdr *hdr;
uint16_t idx;
- uint16_t head_size = vq->hw->vtnet_hdr_size;
+ int16_t head_size = vq->hw->vtnet_hdr_size;
uint16_t i = 0;
idx = vq->vq_desc_head_idx;
dxp->ndescs = 1;
virtio_update_packet_stats(&txvq->stats, cookies[i]);
- hdr = (struct virtio_net_hdr *)(char *)cookies[i]->buf_addr +
- cookies[i]->data_off - head_size;
+ hdr = rte_pktmbuf_mtod_offset(cookies[i],
+ struct virtio_net_hdr *, -head_size);
/* if offload disabled, hdr is not zeroed yet, do it now */
if (!vq->hw->has_tx_offload)
struct vring_packed_desc *dp;
struct vq_desc_extra *dxp;
uint16_t idx, id, flags;
- uint16_t head_size = vq->hw->vtnet_hdr_size;
+ int16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
flags = vq->vq_packed.cached_flags;
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
- cookie->data_off - head_size;
+ hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+ -head_size);
/* if offload disabled, hdr is not zeroed yet, do it now */
if (!vq->hw->has_tx_offload)
struct virtqueue *vq = txvq->vq;
struct vring_packed_desc *start_dp, *head_dp;
uint16_t idx, id, head_idx, head_flags;
- uint16_t head_size = vq->hw->vtnet_hdr_size;
+ int16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
uint16_t prev;
bool prepend_header = false;
if (can_push) {
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
- cookie->data_off - head_size;
+ hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+ -head_size);
prepend_header = true;
/* if offload disabled, it is not zeroed below, do it now */
struct vring_desc *start_dp;
uint16_t seg_num = cookie->nb_segs;
uint16_t head_idx, idx;
- uint16_t head_size = vq->hw->vtnet_hdr_size;
+ int16_t head_size = vq->hw->vtnet_hdr_size;
bool prepend_header = false;
struct virtio_net_hdr *hdr;
if (can_push) {
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
- cookie->data_off - head_size;
+ hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+ -head_size);
prepend_header = true;
/* if offload disabled, it is not zeroed below, do it now */
struct virtio_hw *hw = dev->data->dev_private;
struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
struct virtnet_rx *rxvq;
+ uint16_t rx_free_thresh;
PMD_INIT_FUNC_TRACE();
return -EINVAL;
}
+ rx_free_thresh = rx_conf->rx_free_thresh;
+ if (rx_free_thresh == 0)
+ rx_free_thresh =
+ RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
+
+ if (rx_free_thresh & 0x3) {
+ RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four."
+ " (rx_free_thresh=%u port=%u queue=%u)\n",
+ rx_free_thresh, dev->data->port_id, queue_idx);
+ return -EINVAL;
+ }
+
+ if (rx_free_thresh >= vq->vq_nentries) {
+ RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the "
+ "number of RX entries (%u)."
+ " (rx_free_thresh=%u port=%u queue=%u)\n",
+ vq->vq_nentries,
+ rx_free_thresh, dev->data->port_id, queue_idx);
+ return -EINVAL;
+ }
+ vq->vq_free_thresh = rx_free_thresh;
+
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
struct rte_mbuf *m;
uint16_t desc_idx;
int error, nbufs, i;
+ bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
PMD_INIT_FUNC_TRACE();
/* Allocate blank mbufs for the each rx descriptor */
nbufs = 0;
- if (hw->use_simple_rx) {
+ if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
for (desc_idx = 0; desc_idx < vq->vq_nentries;
desc_idx++) {
vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
&rxvq->fake_mbuf;
}
- if (hw->use_simple_rx) {
+ if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxvq);
nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
}
- } else if (hw->use_inorder_rx) {
+ } else if (!vtpci_packed_queue(vq->hw) && in_order) {
if ((!virtqueue_full(vq))) {
uint16_t free_cnt = vq->vq_free_cnt;
struct rte_mbuf *pkts[free_cnt];
PMD_INIT_FUNC_TRACE();
+ if (tx_conf->tx_deferred_start) {
+ PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
+ return -EINVAL;
+ }
+
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
if (tx_free_thresh >= (vq->vq_nentries - 3)) {
- RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
+ PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
"number of TX entries minus 3 (%u)."
" (tx_free_thresh=%u port=%u queue=%u)\n",
vq->vq_nentries - 3,
PMD_INIT_FUNC_TRACE();
if (!vtpci_packed_queue(hw)) {
- if (hw->use_inorder_tx)
+ if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER))
vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
}
error = virtqueue_enqueue_recv_refill(vq, &m, 1);
if (unlikely(error)) {
- RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
+ PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
rte_pktmbuf_free(m);
}
}
error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
if (unlikely(error)) {
- RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
+ PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
rte_pktmbuf_free(m);
}
}
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_tx = 0;
- bool in_order = hw->use_inorder_tx;
+ bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;