#include "virtqueue.h"
#include "virtio_logs.h"
-#include "virtio_pci.h"
+#include "virtio.h"
#include "virtio_rxtx_simple.h"
/*
end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
for (idx = 0; idx < vq->vq_nentries; idx++) {
- if (hw->use_simple_rx && type == VTNET_RQ) {
+ if (hw->use_vec_rx && !virtio_with_packed_queue(hw) &&
+ type == VTNET_RQ) {
if (start <= end && idx >= start && idx < end)
continue;
if (start > end && (idx >= start || idx < end))
struct vq_desc_extra *dxp;
uint16_t i;
- struct vring_packed_desc *descs = vq->vq_packed.ring.desc_packed;
+ struct vring_packed_desc *descs = vq->vq_packed.ring.desc;
int cnt = 0;
i = vq->vq_used_cons_idx;
uint16_t used_idx, desc_idx;
uint16_t nb_used, i;
- nb_used = VIRTQUEUE_NUSED(vq);
+ nb_used = virtqueue_nused(vq);
for (i = 0; i < nb_used; i++) {
used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
uep = &vq->vq_split.ring.used->ring[used_idx];
- if (hw->use_simple_rx) {
+ if (hw->use_vec_rx) {
desc_idx = used_idx;
rte_pktmbuf_free(vq->sw_ring[desc_idx]);
vq->vq_free_cnt++;
vq->vq_used_cons_idx++;
}
- if (hw->use_simple_rx) {
+ if (hw->use_vec_rx) {
while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxq);
if (virtqueue_kick_prepare(vq))
{
struct virtio_hw *hw = vq->hw;
- if (vtpci_packed_queue(hw))
+ if (virtio_with_packed_queue(hw))
virtqueue_rxvq_flush_packed(vq);
else
virtqueue_rxvq_flush_split(vq);
}
+
+int
+virtqueue_rxvq_reset_packed(struct virtqueue *vq)
+{
+ int size = vq->vq_nentries;
+ struct vq_desc_extra *dxp;
+ struct virtnet_rx *rxvq;
+ uint16_t desc_idx;
+
+ vq->vq_used_cons_idx = 0;
+ vq->vq_desc_head_idx = 0;
+ vq->vq_avail_idx = 0;
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+
+ vq->vq_packed.used_wrap_counter = 1;
+ vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+ vq->vq_packed.event_flags_shadow = 0;
+ vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+
+ rxvq = &vq->rxq;
+ memset(rxvq->mz->addr, 0, rxvq->mz->len);
+
+ for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ }
+
+ vring_desc_init_packed(vq, size);
+
+ virtqueue_disable_intr(vq);
+ return 0;
+}
+
+int
+virtqueue_txvq_reset_packed(struct virtqueue *vq)
+{
+ int size = vq->vq_nentries;
+ struct vq_desc_extra *dxp;
+ struct virtnet_tx *txvq;
+ uint16_t desc_idx;
+
+ vq->vq_used_cons_idx = 0;
+ vq->vq_desc_head_idx = 0;
+ vq->vq_avail_idx = 0;
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+
+ vq->vq_packed.used_wrap_counter = 1;
+ vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+ vq->vq_packed.event_flags_shadow = 0;
+
+ txvq = &vq->txq;
+ memset(txvq->mz->addr, 0, txvq->mz->len);
+ memset(txvq->virtio_net_hdr_mz->addr, 0,
+ txvq->virtio_net_hdr_mz->len);
+
+ for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ }
+
+ vring_desc_init_packed(vq, size);
+
+ virtqueue_disable_intr(vq);
+ return 0;
+}