eal/riscv: support RISC-V architecture
[dpdk.git] / drivers / net / i40e / i40e_rxtx_vec_common.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4
5 #ifndef _I40E_RXTX_VEC_COMMON_H_
6 #define _I40E_RXTX_VEC_COMMON_H_
7 #include <stdint.h>
8 #include <ethdev_driver.h>
9 #include <rte_malloc.h>
10
11 #include "i40e_ethdev.h"
12 #include "i40e_rxtx.h"
13
14 #ifndef __INTEL_COMPILER
15 #pragma GCC diagnostic ignored "-Wcast-qual"
16 #endif
17
18 static inline uint16_t
19 reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
20                    uint16_t nb_bufs, uint8_t *split_flags)
21 {
22         struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
23         struct rte_mbuf *start = rxq->pkt_first_seg;
24         struct rte_mbuf *end =  rxq->pkt_last_seg;
25         unsigned pkt_idx, buf_idx;
26
27         for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
28                 if (end != NULL) {
29                         /* processing a split packet */
30                         end->next = rx_bufs[buf_idx];
31                         rx_bufs[buf_idx]->data_len += rxq->crc_len;
32
33                         start->nb_segs++;
34                         start->pkt_len += rx_bufs[buf_idx]->data_len;
35                         end = end->next;
36
37                         if (!split_flags[buf_idx]) {
38                                 /* it's the last packet of the set */
39                                 start->hash = end->hash;
40                                 start->vlan_tci = end->vlan_tci;
41                                 start->ol_flags = end->ol_flags;
42                                 /* we need to strip crc for the whole packet */
43                                 start->pkt_len -= rxq->crc_len;
44                                 if (end->data_len > rxq->crc_len)
45                                         end->data_len -= rxq->crc_len;
46                                 else {
47                                         /* free up last mbuf */
48                                         struct rte_mbuf *secondlast = start;
49
50                                         start->nb_segs--;
51                                         while (secondlast->next != end)
52                                                 secondlast = secondlast->next;
53                                         secondlast->data_len -= (rxq->crc_len -
54                                                         end->data_len);
55                                         secondlast->next = NULL;
56                                         rte_pktmbuf_free_seg(end);
57                                 }
58                                 pkts[pkt_idx++] = start;
59                                 start = end = NULL;
60                         }
61                 } else {
62                         /* not processing a split packet */
63                         if (!split_flags[buf_idx]) {
64                                 /* not a split packet, save and skip */
65                                 pkts[pkt_idx++] = rx_bufs[buf_idx];
66                                 continue;
67                         }
68                         end = start = rx_bufs[buf_idx];
69                         rx_bufs[buf_idx]->data_len += rxq->crc_len;
70                         rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
71                 }
72         }
73
74         /* save the partial packet for next time */
75         rxq->pkt_first_seg = start;
76         rxq->pkt_last_seg = end;
77         memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
78         return pkt_idx;
79 }
80
81 static __rte_always_inline int
82 i40e_tx_free_bufs(struct i40e_tx_queue *txq)
83 {
84         struct i40e_tx_entry *txep;
85         uint32_t n;
86         uint32_t i;
87         int nb_free = 0;
88         struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
89
90         /* check DD bits on threshold descriptor */
91         if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
92                         rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
93                         rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
94                 return 0;
95
96         n = txq->tx_rs_thresh;
97
98          /* first buffer to free from S/W ring is at index
99           * tx_next_dd - (tx_rs_thresh-1)
100           */
101         txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
102
103         if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
104                 for (i = 0; i < n; i++) {
105                         free[i] = txep[i].mbuf;
106                         /* no need to reset txep[i].mbuf in vector path */
107                 }
108                 rte_mempool_put_bulk(free[0]->pool, (void **)free, n);
109                 goto done;
110         }
111
112         m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
113         if (likely(m != NULL)) {
114                 free[0] = m;
115                 nb_free = 1;
116                 for (i = 1; i < n; i++) {
117                         m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
118                         if (likely(m != NULL)) {
119                                 if (likely(m->pool == free[0]->pool)) {
120                                         free[nb_free++] = m;
121                                 } else {
122                                         rte_mempool_put_bulk(free[0]->pool,
123                                                              (void *)free,
124                                                              nb_free);
125                                         free[0] = m;
126                                         nb_free = 1;
127                                 }
128                         }
129                 }
130                 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
131         } else {
132                 for (i = 1; i < n; i++) {
133                         m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
134                         if (m != NULL)
135                                 rte_mempool_put(m->pool, m);
136                 }
137         }
138
139 done:
140         /* buffers were freed, update counters */
141         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
142         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
143         if (txq->tx_next_dd >= txq->nb_tx_desc)
144                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
145
146         return txq->tx_rs_thresh;
147 }
148
149 static __rte_always_inline void
150 tx_backlog_entry(struct i40e_tx_entry *txep,
151                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
152 {
153         int i;
154
155         for (i = 0; i < (int)nb_pkts; ++i)
156                 txep[i].mbuf = tx_pkts[i];
157 }
158
159 static inline void
160 _i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
161 {
162         const unsigned mask = rxq->nb_rx_desc - 1;
163         unsigned i;
164
165         if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
166                 return;
167
168         /* free all mbufs that are valid in the ring */
169         if (rxq->rxrearm_nb == 0) {
170                 for (i = 0; i < rxq->nb_rx_desc; i++) {
171                         if (rxq->sw_ring[i].mbuf != NULL)
172                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
173                 }
174         } else {
175                 for (i = rxq->rx_tail;
176                      i != rxq->rxrearm_start;
177                      i = (i + 1) & mask) {
178                         if (rxq->sw_ring[i].mbuf != NULL)
179                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
180                 }
181         }
182
183         rxq->rxrearm_nb = rxq->nb_rx_desc;
184
185         /* set all entries to NULL */
186         memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
187 }
188
189 static inline int
190 i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq)
191 {
192         uintptr_t p;
193         struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
194
195         mb_def.nb_segs = 1;
196         mb_def.data_off = RTE_PKTMBUF_HEADROOM;
197         mb_def.port = rxq->port_id;
198         rte_mbuf_refcnt_set(&mb_def, 1);
199
200         /* prevent compiler reordering: rearm_data covers previous fields */
201         rte_compiler_barrier();
202         p = (uintptr_t)&mb_def.rearm_data;
203         rxq->mbuf_initializer = *(uint64_t *)p;
204         return 0;
205 }
206
207 static inline int
208 i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
209 {
210 #ifndef RTE_LIBRTE_IEEE1588
211         struct i40e_adapter *ad =
212                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
213         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
214         struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
215         struct i40e_rx_queue *rxq;
216         uint16_t desc, i;
217         bool first_queue;
218
219         /* no fdir support */
220         if (fconf->mode != RTE_FDIR_MODE_NONE)
221                 return -1;
222
223          /* no header split support */
224         if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
225                 return -1;
226
227         /* no QinQ support */
228         if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
229                 return -1;
230
231         /**
232          * Vector mode is allowed only when number of Rx queue
233          * descriptor is power of 2.
234          */
235         if (!dev->data->dev_started) {
236                 first_queue = true;
237                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
238                         rxq = dev->data->rx_queues[i];
239                         if (!rxq)
240                                 continue;
241                         desc = rxq->nb_rx_desc;
242                         if (first_queue)
243                                 ad->rx_vec_allowed =
244                                         rte_is_power_of_2(desc);
245                         else
246                                 ad->rx_vec_allowed =
247                                         ad->rx_vec_allowed ?
248                                         rte_is_power_of_2(desc) :
249                                         ad->rx_vec_allowed;
250                         first_queue = false;
251                 }
252         } else {
253                 /* Only check the first queue's descriptor number */
254                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
255                         rxq = dev->data->rx_queues[i];
256                         if (!rxq)
257                                 continue;
258                         desc = rxq->nb_rx_desc;
259                         ad->rx_vec_allowed = rte_is_power_of_2(desc);
260                         break;
261                 }
262         }
263
264         return 0;
265 #else
266         RTE_SET_USED(dev);
267         return -1;
268 #endif
269 }
270
271 #endif