git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net/octeontx2: add Tx queue rate limit
[dpdk.git]
/
drivers
/
net
/
virtio
/
virtio_rxtx.c
diff --git
a/drivers/net/virtio/virtio_rxtx.c
b/drivers/net/virtio/virtio_rxtx.c
index
e1c0e28
..
0604105
100644
(file)
--- a/
drivers/net/virtio/virtio_rxtx.c
+++ b/
drivers/net/virtio/virtio_rxtx.c
@@
-106,7
+106,7
@@
vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
dxp->next = VQ_RING_DESC_CHAIN_END;
}
dxp->next = VQ_RING_DESC_CHAIN_END;
}
-
static inline
void
+void
virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
{
uint32_t s = mbuf->pkt_len;
virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
{
uint32_t s = mbuf->pkt_len;
@@
-164,9
+164,11
@@
virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
for (i = 0; i < num; i++) {
used_idx = vq->vq_used_cons_idx;
for (i = 0; i < num; i++) {
used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ * and wait for used desc in virtqueue.
+ */
if (!desc_is_used(&desc[used_idx], vq))
return i;
if (!desc_is_used(&desc[used_idx], vq))
return i;
- virtio_rmb(vq->hw->weak_barriers);
len[i] = desc[used_idx].len;
id = desc[used_idx].id;
cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
len[i] = desc[used_idx].len;
id = desc[used_idx].id;
cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
@@
-275,8
+277,10
@@
virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ * and wait for used desc in virtqueue.
+ */
while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
- virtio_rmb(vq->hw->weak_barriers);
id = desc[used_idx].id;
do {
curr_id = used_idx;
id = desc[used_idx].id;
do {
curr_id = used_idx;
@@
-307,8
+311,10
@@
virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ * and wait for used desc in virtqueue.
+ */
while (num-- && desc_is_used(&desc[used_idx], vq)) {
while (num-- && desc_is_used(&desc[used_idx], vq)) {
- virtio_rmb(vq->hw->weak_barriers);
id = desc[used_idx].id;
dxp = &vq->vq_descx[id];
vq->vq_used_cons_idx += dxp->ndescs;
id = desc[used_idx].id;
dxp = &vq->vq_descx[id];
vq->vq_used_cons_idx += dxp->ndescs;
@@
-498,8
+504,10
@@
virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
vq->vq_desc_head_idx = dxp->next;
if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
vq->vq_desc_head_idx = dxp->next;
if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
- virtio_wmb(hw->weak_barriers);
- start_dp[idx].flags = flags;
+
+ virtqueue_store_flags_packed(&start_dp[idx], flags,
+ hw->weak_barriers);
+
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->vq_packed.cached_flags ^=
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->vq_packed.cached_flags ^=
@@
-627,7
+635,7
@@
virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
struct vring_desc *start_dp;
struct virtio_net_hdr *hdr;
uint16_t idx;
struct vring_desc *start_dp;
struct virtio_net_hdr *hdr;
uint16_t idx;
-
u
int16_t head_size = vq->hw->vtnet_hdr_size;
+ int16_t head_size = vq->hw->vtnet_hdr_size;
uint16_t i = 0;
idx = vq->vq_desc_head_idx;
uint16_t i = 0;
idx = vq->vq_desc_head_idx;
@@
-640,9
+648,8
@@
virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
dxp->ndescs = 1;
virtio_update_packet_stats(&txvq->stats, cookies[i]);
dxp->ndescs = 1;
virtio_update_packet_stats(&txvq->stats, cookies[i]);
- hdr = (struct virtio_net_hdr *)
- rte_pktmbuf_prepend(cookies[i], head_size);
- cookies[i]->pkt_len -= head_size;
+ hdr = rte_pktmbuf_mtod_offset(cookies[i],
+ struct virtio_net_hdr *, -head_size);
/* if offload disabled, hdr is not zeroed yet, do it now */
if (!vq->hw->has_tx_offload)
/* if offload disabled, hdr is not zeroed yet, do it now */
if (!vq->hw->has_tx_offload)
@@
-650,10
+657,12
@@
virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
else
virtqueue_xmit_offload(hdr, cookies[i], true);
else
virtqueue_xmit_offload(hdr, cookies[i], true);
- start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
- start_dp[idx].len = cookies[i]->data_len;
+ start_dp[idx].addr =
+ VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
+ start_dp[idx].len = cookies[i]->data_len + head_size;
start_dp[idx].flags = 0;
start_dp[idx].flags = 0;
+
vq_update_avail_ring(vq, idx);
idx++;
vq_update_avail_ring(vq, idx);
idx++;
@@
-673,7
+682,7
@@
virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
struct vring_packed_desc *dp;
struct vq_desc_extra *dxp;
uint16_t idx, id, flags;
struct vring_packed_desc *dp;
struct vq_desc_extra *dxp;
uint16_t idx, id, flags;
-
u
int16_t head_size = vq->hw->vtnet_hdr_size;
+ int16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
struct virtio_net_hdr *hdr;
id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
@@
-687,9
+696,8
@@
virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
flags = vq->vq_packed.cached_flags;
/* prepend cannot fail, checked by caller */
flags = vq->vq_packed.cached_flags;
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)
- rte_pktmbuf_prepend(cookie, head_size);
- cookie->pkt_len -= head_size;
+ hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+ -head_size);
/* if offload disabled, hdr is not zeroed yet, do it now */
if (!vq->hw->has_tx_offload)
/* if offload disabled, hdr is not zeroed yet, do it now */
if (!vq->hw->has_tx_offload)
@@
-697,8
+705,8
@@
virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
else
virtqueue_xmit_offload(hdr, cookie, true);
else
virtqueue_xmit_offload(hdr, cookie, true);
- dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
- dp->len = cookie->data_len;
+ dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq)
- head_size
;
+ dp->len = cookie->data_len
+ head_size
;
dp->id = id;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
dp->id = id;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
@@
-714,8
+722,7
@@
virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
}
vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
}
- virtio_wmb(vq->hw->weak_barriers);
- dp->flags = flags;
+ virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
}
static inline void
}
static inline void
@@
-727,9
+734,10
@@
virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
struct virtqueue *vq = txvq->vq;
struct vring_packed_desc *start_dp, *head_dp;
uint16_t idx, id, head_idx, head_flags;
struct virtqueue *vq = txvq->vq;
struct vring_packed_desc *start_dp, *head_dp;
uint16_t idx, id, head_idx, head_flags;
-
u
int16_t head_size = vq->hw->vtnet_hdr_size;
+ int16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
uint16_t prev;
struct virtio_net_hdr *hdr;
uint16_t prev;
+ bool prepend_header = false;
id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
@@
-748,12
+756,9
@@
virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
if (can_push) {
/* prepend cannot fail, checked by caller */
if (can_push) {
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)
- rte_pktmbuf_prepend(cookie, head_size);
- /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
- * which is wrong. Below subtract restores correct pkt size.
- */
- cookie->pkt_len -= head_size;
+ hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+ -head_size);
+ prepend_header = true;
/* if offload disabled, it is not zeroed below, do it now */
if (!vq->hw->has_tx_offload)
/* if offload disabled, it is not zeroed below, do it now */
if (!vq->hw->has_tx_offload)
@@
-781,6
+786,12
@@
virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
+ if (prepend_header) {
+ start_dp[idx].addr -= head_size;
+ start_dp[idx].len += head_size;
+ prepend_header = false;
+ }
+
if (likely(idx != head_idx)) {
flags = cookie->next ? VRING_DESC_F_NEXT : 0;
flags |= vq->vq_packed.cached_flags;
if (likely(idx != head_idx)) {
flags = cookie->next ? VRING_DESC_F_NEXT : 0;
flags |= vq->vq_packed.cached_flags;
@@
-806,8
+817,8
@@
virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
}
vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
}
- virt
io_wmb(vq->hw->weak_barriers);
-
head_dp->flags = head_flags
;
+ virt
queue_store_flags_packed(head_dp, head_flags,
+
vq->hw->weak_barriers)
;
}
static inline void
}
static inline void
@@
-821,7
+832,8
@@
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
struct vring_desc *start_dp;
uint16_t seg_num = cookie->nb_segs;
uint16_t head_idx, idx;
struct vring_desc *start_dp;
uint16_t seg_num = cookie->nb_segs;
uint16_t head_idx, idx;
- uint16_t head_size = vq->hw->vtnet_hdr_size;
+ int16_t head_size = vq->hw->vtnet_hdr_size;
+ bool prepend_header = false;
struct virtio_net_hdr *hdr;
head_idx = vq->vq_desc_head_idx;
struct virtio_net_hdr *hdr;
head_idx = vq->vq_desc_head_idx;
@@
-837,12
+849,9
@@
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
if (can_push) {
/* prepend cannot fail, checked by caller */
if (can_push) {
/* prepend cannot fail, checked by caller */
- hdr = (struct virtio_net_hdr *)
- rte_pktmbuf_prepend(cookie, head_size);
- /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
- * which is wrong. Below subtract restores correct pkt size.
- */
- cookie->pkt_len -= head_size;
+ hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+ -head_size);
+ prepend_header = true;
/* if offload disabled, it is not zeroed below, do it now */
if (!vq->hw->has_tx_offload)
/* if offload disabled, it is not zeroed below, do it now */
if (!vq->hw->has_tx_offload)
@@
-881,6
+890,11
@@
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
do {
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
do {
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
+ if (prepend_header) {
+ start_dp[idx].addr -= head_size;
+ start_dp[idx].len += head_size;
+ prepend_header = false;
+ }
start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
idx = start_dp[idx].next;
} while ((cookie = cookie->next) != NULL);
start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
idx = start_dp[idx].next;
} while ((cookie = cookie->next) != NULL);
@@
-915,7
+929,7
@@
virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id __rte_unused,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf
__rte_unused
,
+ const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
struct rte_mempool *mp)
{
uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
@@
-925,6
+939,11
@@
virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
PMD_INIT_FUNC_TRACE();
+ if (rx_conf->rx_deferred_start) {
+ PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
+ return -EINVAL;
+ }
+
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
@@
-1048,6
+1067,11
@@
virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
PMD_INIT_FUNC_TRACE();
+ if (tx_conf->tx_deferred_start) {
+ PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
+ return -EINVAL;
+ }
+
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
@@
-1061,7
+1085,7
@@
virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
if (tx_free_thresh >= (vq->vq_nentries - 3)) {
RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
if (tx_free_thresh >= (vq->vq_nentries - 3)) {
-
RTE_LOG(ERR, PMD
, "tx_free_thresh must be less than the "
+
PMD_DRV_LOG(ERR
, "tx_free_thresh must be less than the "
"number of TX entries minus 3 (%u)."
" (tx_free_thresh=%u port=%u queue=%u)\n",
vq->vq_nentries - 3,
"number of TX entries minus 3 (%u)."
" (tx_free_thresh=%u port=%u queue=%u)\n",
vq->vq_nentries - 3,
@@
-1109,7
+1133,7
@@
virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
error = virtqueue_enqueue_recv_refill(vq, &m, 1);
if (unlikely(error)) {
error = virtqueue_enqueue_recv_refill(vq, &m, 1);
if (unlikely(error)) {
-
RTE_LOG(ERR, PMD
, "cannot requeue discarded mbuf");
+
PMD_DRV_LOG(ERR
, "cannot requeue discarded mbuf");
rte_pktmbuf_free(m);
}
}
rte_pktmbuf_free(m);
}
}
@@
-1121,7
+1145,7
@@
virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
if (unlikely(error)) {
error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
if (unlikely(error)) {
-
RTE_LOG(ERR, PMD
, "cannot requeue discarded mbuf");
+
PMD_DRV_LOG(ERR
, "cannot requeue discarded mbuf");
rte_pktmbuf_free(m);
}
}
rte_pktmbuf_free(m);
}
}