vdpa/mlx5: support queue update
[dpdk.git] / drivers / net / virtio / virtqueue.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 #include <stdint.h>
5
6 #include <rte_mbuf.h>
7
8 #include "virtqueue.h"
9 #include "virtio_logs.h"
10 #include "virtio_pci.h"
11 #include "virtio_rxtx_simple.h"
12
13 /*
14  * Two types of mbuf to be cleaned:
15  * 1) mbuf that has been consumed by backend but not used by virtio.
16  * 2) mbuf that hasn't been consued by backend.
17  */
18 struct rte_mbuf *
19 virtqueue_detach_unused(struct virtqueue *vq)
20 {
21         struct rte_mbuf *cookie;
22         struct virtio_hw *hw;
23         uint16_t start, end;
24         int type, idx;
25
26         if (vq == NULL)
27                 return NULL;
28
29         hw = vq->hw;
30         type = virtio_get_queue_type(hw, vq->vq_queue_index);
31         start = vq->vq_avail_idx & (vq->vq_nentries - 1);
32         end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
33
34         for (idx = 0; idx < vq->vq_nentries; idx++) {
35                 if (hw->use_vec_rx && !vtpci_packed_queue(hw) &&
36                     type == VTNET_RQ) {
37                         if (start <= end && idx >= start && idx < end)
38                                 continue;
39                         if (start > end && (idx >= start || idx < end))
40                                 continue;
41                         cookie = vq->sw_ring[idx];
42                         if (cookie != NULL) {
43                                 vq->sw_ring[idx] = NULL;
44                                 return cookie;
45                         }
46                 } else {
47                         cookie = vq->vq_descx[idx].cookie;
48                         if (cookie != NULL) {
49                                 vq->vq_descx[idx].cookie = NULL;
50                                 return cookie;
51                         }
52                 }
53         }
54
55         return NULL;
56 }
57
58 /* Flush used descs */
59 static void
60 virtqueue_rxvq_flush_packed(struct virtqueue *vq)
61 {
62         struct vq_desc_extra *dxp;
63         uint16_t i;
64
65         struct vring_packed_desc *descs = vq->vq_packed.ring.desc;
66         int cnt = 0;
67
68         i = vq->vq_used_cons_idx;
69         while (desc_is_used(&descs[i], vq) && cnt++ < vq->vq_nentries) {
70                 dxp = &vq->vq_descx[descs[i].id];
71                 if (dxp->cookie != NULL) {
72                         rte_pktmbuf_free(dxp->cookie);
73                         dxp->cookie = NULL;
74                 }
75                 vq->vq_free_cnt++;
76                 vq->vq_used_cons_idx++;
77                 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
78                         vq->vq_used_cons_idx -= vq->vq_nentries;
79                         vq->vq_packed.used_wrap_counter ^= 1;
80                 }
81                 i = vq->vq_used_cons_idx;
82         }
83 }
84
85 /* Flush the elements in the used ring. */
86 static void
87 virtqueue_rxvq_flush_split(struct virtqueue *vq)
88 {
89         struct virtnet_rx *rxq = &vq->rxq;
90         struct virtio_hw *hw = vq->hw;
91         struct vring_used_elem *uep;
92         struct vq_desc_extra *dxp;
93         uint16_t used_idx, desc_idx;
94         uint16_t nb_used, i;
95
96         nb_used = virtqueue_nused(vq);
97
98         for (i = 0; i < nb_used; i++) {
99                 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
100                 uep = &vq->vq_split.ring.used->ring[used_idx];
101                 if (hw->use_vec_rx) {
102                         desc_idx = used_idx;
103                         rte_pktmbuf_free(vq->sw_ring[desc_idx]);
104                         vq->vq_free_cnt++;
105                 } else if (hw->use_inorder_rx) {
106                         desc_idx = (uint16_t)uep->id;
107                         dxp = &vq->vq_descx[desc_idx];
108                         if (dxp->cookie != NULL) {
109                                 rte_pktmbuf_free(dxp->cookie);
110                                 dxp->cookie = NULL;
111                         }
112                         vq_ring_free_inorder(vq, desc_idx, 1);
113                 } else {
114                         desc_idx = (uint16_t)uep->id;
115                         dxp = &vq->vq_descx[desc_idx];
116                         if (dxp->cookie != NULL) {
117                                 rte_pktmbuf_free(dxp->cookie);
118                                 dxp->cookie = NULL;
119                         }
120                         vq_ring_free_chain(vq, desc_idx);
121                 }
122                 vq->vq_used_cons_idx++;
123         }
124
125         if (hw->use_vec_rx) {
126                 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
127                         virtio_rxq_rearm_vec(rxq);
128                         if (virtqueue_kick_prepare(vq))
129                                 virtqueue_notify(vq);
130                 }
131         }
132 }
133
134 /* Flush the elements in the used ring. */
135 void
136 virtqueue_rxvq_flush(struct virtqueue *vq)
137 {
138         struct virtio_hw *hw = vq->hw;
139
140         if (vtpci_packed_queue(hw))
141                 virtqueue_rxvq_flush_packed(vq);
142         else
143                 virtqueue_rxvq_flush_split(vq);
144 }
145
146 int
147 virtqueue_rxvq_reset_packed(struct virtqueue *vq)
148 {
149         int size = vq->vq_nentries;
150         struct vq_desc_extra *dxp;
151         struct virtnet_rx *rxvq;
152         uint16_t desc_idx;
153
154         vq->vq_used_cons_idx = 0;
155         vq->vq_desc_head_idx = 0;
156         vq->vq_avail_idx = 0;
157         vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
158         vq->vq_free_cnt = vq->vq_nentries;
159
160         vq->vq_packed.used_wrap_counter = 1;
161         vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
162         vq->vq_packed.event_flags_shadow = 0;
163         vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
164
165         rxvq = &vq->rxq;
166         memset(rxvq->mz->addr, 0, rxvq->mz->len);
167
168         for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
169                 dxp = &vq->vq_descx[desc_idx];
170                 if (dxp->cookie != NULL) {
171                         rte_pktmbuf_free(dxp->cookie);
172                         dxp->cookie = NULL;
173                 }
174         }
175
176         vring_desc_init_packed(vq, size);
177
178         virtqueue_disable_intr(vq);
179         return 0;
180 }
181
182 int
183 virtqueue_txvq_reset_packed(struct virtqueue *vq)
184 {
185         int size = vq->vq_nentries;
186         struct vq_desc_extra *dxp;
187         struct virtnet_tx *txvq;
188         uint16_t desc_idx;
189
190         vq->vq_used_cons_idx = 0;
191         vq->vq_desc_head_idx = 0;
192         vq->vq_avail_idx = 0;
193         vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
194         vq->vq_free_cnt = vq->vq_nentries;
195
196         vq->vq_packed.used_wrap_counter = 1;
197         vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
198         vq->vq_packed.event_flags_shadow = 0;
199
200         txvq = &vq->txq;
201         memset(txvq->mz->addr, 0, txvq->mz->len);
202         memset(txvq->virtio_net_hdr_mz->addr, 0,
203                 txvq->virtio_net_hdr_mz->len);
204
205         for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
206                 dxp = &vq->vq_descx[desc_idx];
207                 if (dxp->cookie != NULL) {
208                         rte_pktmbuf_free(dxp->cookie);
209                         dxp->cookie = NULL;
210                 }
211         }
212
213         vring_desc_init_packed(vq, size);
214
215         virtqueue_disable_intr(vq);
216         return 0;
217 }