vdpa/mlx5: support queue update
[dpdk.git] / drivers / net / ice / ice_rxtx_vec_common.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #ifndef _ICE_RXTX_VEC_COMMON_H_
6 #define _ICE_RXTX_VEC_COMMON_H_
7
8 #include "ice_rxtx.h"
9
10 static inline uint16_t
11 ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,
12                           uint16_t nb_bufs, uint8_t *split_flags)
13 {
14         struct rte_mbuf *pkts[ICE_VPMD_RX_BURST] = {0}; /*finished pkts*/
15         struct rte_mbuf *start = rxq->pkt_first_seg;
16         struct rte_mbuf *end =  rxq->pkt_last_seg;
17         unsigned int pkt_idx, buf_idx;
18
19         for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
20                 if (end) {
21                         /* processing a split packet */
22                         end->next = rx_bufs[buf_idx];
23                         rx_bufs[buf_idx]->data_len += rxq->crc_len;
24
25                         start->nb_segs++;
26                         start->pkt_len += rx_bufs[buf_idx]->data_len;
27                         end = end->next;
28
29                         if (!split_flags[buf_idx]) {
30                                 /* it's the last packet of the set */
31                                 start->hash = end->hash;
32                                 start->vlan_tci = end->vlan_tci;
33                                 start->ol_flags = end->ol_flags;
34                                 /* we need to strip crc for the whole packet */
35                                 start->pkt_len -= rxq->crc_len;
36                                 if (end->data_len > rxq->crc_len) {
37                                         end->data_len -= rxq->crc_len;
38                                 } else {
39                                         /* free up last mbuf */
40                                         struct rte_mbuf *secondlast = start;
41
42                                         start->nb_segs--;
43                                         while (secondlast->next != end)
44                                                 secondlast = secondlast->next;
45                                         secondlast->data_len -= (rxq->crc_len -
46                                                         end->data_len);
47                                         secondlast->next = NULL;
48                                         rte_pktmbuf_free_seg(end);
49                                 }
50                                 pkts[pkt_idx++] = start;
51                                 start = NULL;
52                                 end = NULL;
53                         }
54                 } else {
55                         /* not processing a split packet */
56                         if (!split_flags[buf_idx]) {
57                                 /* not a split packet, save and skip */
58                                 pkts[pkt_idx++] = rx_bufs[buf_idx];
59                                 continue;
60                         }
61                         start = rx_bufs[buf_idx];
62                         end = start;
63                         rx_bufs[buf_idx]->data_len += rxq->crc_len;
64                         rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
65                 }
66         }
67
68         /* save the partial packet for next time */
69         rxq->pkt_first_seg = start;
70         rxq->pkt_last_seg = end;
71         rte_memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
72         return pkt_idx;
73 }
74
75 static __rte_always_inline int
76 ice_tx_free_bufs(struct ice_tx_queue *txq)
77 {
78         struct ice_tx_entry *txep;
79         uint32_t n;
80         uint32_t i;
81         int nb_free = 0;
82         struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
83
84         /* check DD bits on threshold descriptor */
85         if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
86                         rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
87                         rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
88                 return 0;
89
90         n = txq->tx_rs_thresh;
91
92          /* first buffer to free from S/W ring is at index
93           * tx_next_dd - (tx_rs_thresh-1)
94           */
95         txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
96         m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
97         if (likely(m)) {
98                 free[0] = m;
99                 nb_free = 1;
100                 for (i = 1; i < n; i++) {
101                         m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
102                         if (likely(m)) {
103                                 if (likely(m->pool == free[0]->pool)) {
104                                         free[nb_free++] = m;
105                                 } else {
106                                         rte_mempool_put_bulk(free[0]->pool,
107                                                              (void *)free,
108                                                              nb_free);
109                                         free[0] = m;
110                                         nb_free = 1;
111                                 }
112                         }
113                 }
114                 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
115         } else {
116                 for (i = 1; i < n; i++) {
117                         m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
118                         if (m)
119                                 rte_mempool_put(m->pool, m);
120                 }
121         }
122
123         /* buffers were freed, update counters */
124         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
125         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
126         if (txq->tx_next_dd >= txq->nb_tx_desc)
127                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
128
129         return txq->tx_rs_thresh;
130 }
131
132 static __rte_always_inline void
133 ice_tx_backlog_entry(struct ice_tx_entry *txep,
134                      struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
135 {
136         int i;
137
138         for (i = 0; i < (int)nb_pkts; ++i)
139                 txep[i].mbuf = tx_pkts[i];
140 }
141
142 static inline void
143 _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
144 {
145         const unsigned int mask = rxq->nb_rx_desc - 1;
146         unsigned int i;
147
148         if (unlikely(!rxq->sw_ring)) {
149                 PMD_DRV_LOG(DEBUG, "sw_ring is NULL");
150                 return;
151         }
152
153         if (rxq->rxrearm_nb >= rxq->nb_rx_desc)
154                 return;
155
156         /* free all mbufs that are valid in the ring */
157         if (rxq->rxrearm_nb == 0) {
158                 for (i = 0; i < rxq->nb_rx_desc; i++) {
159                         if (rxq->sw_ring[i].mbuf)
160                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
161                 }
162         } else {
163                 for (i = rxq->rx_tail;
164                      i != rxq->rxrearm_start;
165                      i = (i + 1) & mask) {
166                         if (rxq->sw_ring[i].mbuf)
167                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
168                 }
169         }
170
171         rxq->rxrearm_nb = rxq->nb_rx_desc;
172
173         /* set all entries to NULL */
174         memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
175 }
176
177 static inline void
178 _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
179 {
180         uint16_t i;
181
182         if (unlikely(!txq || !txq->sw_ring)) {
183                 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
184                 return;
185         }
186
187         /**
188          *  vPMD tx will not set sw_ring's mbuf to NULL after free,
189          *  so need to free remains more carefully.
190          */
191         i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
192         if (txq->tx_tail < i) {
193                 for (; i < txq->nb_tx_desc; i++) {
194                         rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
195                         txq->sw_ring[i].mbuf = NULL;
196                 }
197                 i = 0;
198         }
199         for (; i < txq->tx_tail; i++) {
200                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
201                 txq->sw_ring[i].mbuf = NULL;
202         }
203 }
204
205 static inline int
206 ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
207 {
208         uintptr_t p;
209         struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
210
211         mb_def.nb_segs = 1;
212         mb_def.data_off = RTE_PKTMBUF_HEADROOM;
213         mb_def.port = rxq->port_id;
214         rte_mbuf_refcnt_set(&mb_def, 1);
215
216         /* prevent compiler reordering: rearm_data covers previous fields */
217         rte_compiler_barrier();
218         p = (uintptr_t)&mb_def.rearm_data;
219         rxq->mbuf_initializer = *(uint64_t *)p;
220         return 0;
221 }
222
223 static inline int
224 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
225 {
226         if (!rxq)
227                 return -1;
228
229         if (!rte_is_power_of_2(rxq->nb_rx_desc))
230                 return -1;
231
232         if (rxq->rx_free_thresh < ICE_VPMD_RX_BURST)
233                 return -1;
234
235         if (rxq->nb_rx_desc % rxq->rx_free_thresh)
236                 return -1;
237
238         if (rxq->proto_xtr != PROTO_XTR_NONE)
239                 return -1;
240
241         return 0;
242 }
243
244 #define ICE_NO_VECTOR_FLAGS (                            \
245                 DEV_TX_OFFLOAD_MULTI_SEGS |              \
246                 DEV_TX_OFFLOAD_VLAN_INSERT |             \
247                 DEV_TX_OFFLOAD_SCTP_CKSUM |              \
248                 DEV_TX_OFFLOAD_UDP_CKSUM |               \
249                 DEV_TX_OFFLOAD_TCP_TSO |                 \
250                 DEV_TX_OFFLOAD_TCP_CKSUM)
251
252 static inline int
253 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
254 {
255         if (!txq)
256                 return -1;
257
258         if (txq->offloads & ICE_NO_VECTOR_FLAGS)
259                 return -1;
260
261         if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
262             txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
263                 return -1;
264
265         return 0;
266 }
267
268 static inline int
269 ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
270 {
271         int i;
272         struct ice_rx_queue *rxq;
273         struct ice_adapter *ad =
274                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
275
276         /* vPMD does not support flow mark. */
277         if (ad->devargs.flow_mark_support)
278                 return -1;
279
280         for (i = 0; i < dev->data->nb_rx_queues; i++) {
281                 rxq = dev->data->rx_queues[i];
282                 if (ice_rx_vec_queue_default(rxq))
283                         return -1;
284         }
285
286         return 0;
287 }
288
289 static inline int
290 ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
291 {
292         int i;
293         struct ice_tx_queue *txq;
294
295         for (i = 0; i < dev->data->nb_tx_queues; i++) {
296                 txq = dev->data->tx_queues[i];
297                 if (ice_tx_vec_queue_default(txq))
298                         return -1;
299         }
300
301         return 0;
302 }
303
304 #endif