net/virtio: fix indirect descriptor reconnection
[dpdk.git] / drivers / net / virtio / virtqueue.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 #include <stdint.h>
5
6 #include <rte_mbuf.h>
7
8 #include "virtqueue.h"
9 #include "virtio_logs.h"
10 #include "virtio.h"
11 #include "virtio_rxtx_simple.h"
12
13 /*
14  * Two types of mbuf to be cleaned:
15  * 1) mbuf that has been consumed by backend but not used by virtio.
16  * 2) mbuf that hasn't been consued by backend.
17  */
18 struct rte_mbuf *
19 virtqueue_detach_unused(struct virtqueue *vq)
20 {
21         struct rte_mbuf *cookie;
22         struct virtio_hw *hw;
23         uint16_t start, end;
24         int type, idx;
25
26         if (vq == NULL)
27                 return NULL;
28
29         hw = vq->hw;
30         type = virtio_get_queue_type(hw, vq->vq_queue_index);
31         start = vq->vq_avail_idx & (vq->vq_nentries - 1);
32         end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
33
34         for (idx = 0; idx < vq->vq_nentries; idx++) {
35                 if (hw->use_vec_rx && !virtio_with_packed_queue(hw) &&
36                     type == VTNET_RQ) {
37                         if (start <= end && idx >= start && idx < end)
38                                 continue;
39                         if (start > end && (idx >= start || idx < end))
40                                 continue;
41                         cookie = vq->sw_ring[idx];
42                         if (cookie != NULL) {
43                                 vq->sw_ring[idx] = NULL;
44                                 return cookie;
45                         }
46                 } else {
47                         cookie = vq->vq_descx[idx].cookie;
48                         if (cookie != NULL) {
49                                 vq->vq_descx[idx].cookie = NULL;
50                                 return cookie;
51                         }
52                 }
53         }
54
55         return NULL;
56 }
57
58 /* Flush used descs */
59 static void
60 virtqueue_rxvq_flush_packed(struct virtqueue *vq)
61 {
62         struct vq_desc_extra *dxp;
63         uint16_t i;
64
65         struct vring_packed_desc *descs = vq->vq_packed.ring.desc;
66         int cnt = 0;
67
68         i = vq->vq_used_cons_idx;
69         while (desc_is_used(&descs[i], vq) && cnt++ < vq->vq_nentries) {
70                 dxp = &vq->vq_descx[descs[i].id];
71                 if (dxp->cookie != NULL) {
72                         rte_pktmbuf_free(dxp->cookie);
73                         dxp->cookie = NULL;
74                 }
75                 vq->vq_free_cnt++;
76                 vq->vq_used_cons_idx++;
77                 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
78                         vq->vq_used_cons_idx -= vq->vq_nentries;
79                         vq->vq_packed.used_wrap_counter ^= 1;
80                 }
81                 i = vq->vq_used_cons_idx;
82         }
83 }
84
85 /* Flush the elements in the used ring. */
86 static void
87 virtqueue_rxvq_flush_split(struct virtqueue *vq)
88 {
89         struct virtnet_rx *rxq = &vq->rxq;
90         struct virtio_hw *hw = vq->hw;
91         struct vring_used_elem *uep;
92         struct vq_desc_extra *dxp;
93         uint16_t used_idx, desc_idx;
94         uint16_t nb_used, i;
95
96         nb_used = virtqueue_nused(vq);
97
98         for (i = 0; i < nb_used; i++) {
99                 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
100                 uep = &vq->vq_split.ring.used->ring[used_idx];
101                 if (hw->use_vec_rx) {
102                         desc_idx = used_idx;
103                         rte_pktmbuf_free(vq->sw_ring[desc_idx]);
104                         vq->vq_free_cnt++;
105                 } else if (hw->use_inorder_rx) {
106                         desc_idx = (uint16_t)uep->id;
107                         dxp = &vq->vq_descx[desc_idx];
108                         if (dxp->cookie != NULL) {
109                                 rte_pktmbuf_free(dxp->cookie);
110                                 dxp->cookie = NULL;
111                         }
112                         vq_ring_free_inorder(vq, desc_idx, 1);
113                 } else {
114                         desc_idx = (uint16_t)uep->id;
115                         dxp = &vq->vq_descx[desc_idx];
116                         if (dxp->cookie != NULL) {
117                                 rte_pktmbuf_free(dxp->cookie);
118                                 dxp->cookie = NULL;
119                         }
120                         vq_ring_free_chain(vq, desc_idx);
121                 }
122                 vq->vq_used_cons_idx++;
123         }
124
125         if (hw->use_vec_rx) {
126                 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
127                         virtio_rxq_rearm_vec(rxq);
128                         if (virtqueue_kick_prepare(vq))
129                                 virtqueue_notify(vq);
130                 }
131         }
132 }
133
134 /* Flush the elements in the used ring. */
135 void
136 virtqueue_rxvq_flush(struct virtqueue *vq)
137 {
138         struct virtio_hw *hw = vq->hw;
139
140         if (virtio_with_packed_queue(hw))
141                 virtqueue_rxvq_flush_packed(vq);
142         else
143                 virtqueue_rxvq_flush_split(vq);
144 }
145
146 int
147 virtqueue_rxvq_reset_packed(struct virtqueue *vq)
148 {
149         int size = vq->vq_nentries;
150         struct vq_desc_extra *dxp;
151         struct virtnet_rx *rxvq;
152         uint16_t desc_idx;
153
154         vq->vq_used_cons_idx = 0;
155         vq->vq_desc_head_idx = 0;
156         vq->vq_avail_idx = 0;
157         vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
158         vq->vq_free_cnt = vq->vq_nentries;
159
160         vq->vq_packed.used_wrap_counter = 1;
161         vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
162         vq->vq_packed.event_flags_shadow = 0;
163         vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
164
165         rxvq = &vq->rxq;
166         memset(rxvq->mz->addr, 0, rxvq->mz->len);
167
168         for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
169                 dxp = &vq->vq_descx[desc_idx];
170                 if (dxp->cookie != NULL) {
171                         rte_pktmbuf_free(dxp->cookie);
172                         dxp->cookie = NULL;
173                 }
174         }
175
176         vring_desc_init_packed(vq, size);
177
178         virtqueue_disable_intr(vq);
179         return 0;
180 }
181
182 int
183 virtqueue_txvq_reset_packed(struct virtqueue *vq)
184 {
185         int size = vq->vq_nentries;
186         struct vq_desc_extra *dxp;
187         struct virtnet_tx *txvq;
188         uint16_t desc_idx;
189         struct virtio_tx_region *txr;
190         struct vring_packed_desc *start_dp;
191
192         vq->vq_used_cons_idx = 0;
193         vq->vq_desc_head_idx = 0;
194         vq->vq_avail_idx = 0;
195         vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
196         vq->vq_free_cnt = vq->vq_nentries;
197
198         vq->vq_packed.used_wrap_counter = 1;
199         vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
200         vq->vq_packed.event_flags_shadow = 0;
201
202         txvq = &vq->txq;
203         txr = txvq->virtio_net_hdr_mz->addr;
204         memset(txvq->mz->addr, 0, txvq->mz->len);
205         memset(txvq->virtio_net_hdr_mz->addr, 0,
206                 txvq->virtio_net_hdr_mz->len);
207
208         for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
209                 dxp = &vq->vq_descx[desc_idx];
210                 if (dxp->cookie != NULL) {
211                         rte_pktmbuf_free(dxp->cookie);
212                         dxp->cookie = NULL;
213                 }
214
215                 if (virtio_with_feature(vq->hw, VIRTIO_RING_F_INDIRECT_DESC)) {
216                         /* first indirect descriptor is always the tx header */
217                         start_dp = txr[desc_idx].tx_packed_indir;
218                         vring_desc_init_indirect_packed(start_dp,
219                                                         RTE_DIM(txr[desc_idx].tx_packed_indir));
220                         start_dp->addr = txvq->virtio_net_hdr_mem
221                                          + desc_idx * sizeof(*txr)
222                                          + offsetof(struct virtio_tx_region, tx_hdr);
223                         start_dp->len = vq->hw->vtnet_hdr_size;
224                 }
225         }
226
227         vring_desc_init_packed(vq, size);
228
229         virtqueue_disable_intr(vq);
230         return 0;
231 }