net/enetc: batch process clean Tx ring calls
[dpdk.git] / drivers / net / enetc / enetc_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4
5 #include <stdbool.h>
6 #include <stdint.h>
7 #include <unistd.h>
8
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
12
13 #include "base/enetc_hw.h"
14 #include "enetc.h"
15 #include "enetc_logs.h"
16
17 #define ENETC_RXBD_BUNDLE 8 /* Number of BDs to update at once */
18
19 static int
20 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
21 {
22         int tx_frm_cnt = 0;
23         struct enetc_swbd *tx_swbd;
24         int i, hwci;
25
26         /* we don't need barriers here, we just want a relatively current value
27          * from HW.
28          */
29         hwci = (int)(rte_read32_relaxed(tx_ring->tcisr) &
30                      ENETC_TBCISR_IDX_MASK);
31
32         i = tx_ring->next_to_clean;
33         tx_swbd = &tx_ring->q_swbd[i];
34
35         /* we're only reading the CI index once here, which means HW may update
36          * it while we're doing clean-up.  We could read the register in a loop
37          * but for now I assume it's OK to leave a few Tx frames for next call.
38          * The issue with reading the register in a loop is that we're stalling
39          * here trying to catch up with HW which keeps sending traffic as long
40          * as it has traffic to send, so in effect we could be waiting here for
41          * the Tx ring to be drained by HW, instead of us doing Rx in that
42          * meantime.
43          */
44         while (i != hwci) {
45                 rte_pktmbuf_free(tx_swbd->buffer_addr);
46                 tx_swbd->buffer_addr = NULL;
47                 tx_swbd++;
48                 i++;
49                 if (unlikely(i == tx_ring->bd_count)) {
50                         i = 0;
51                         tx_swbd = &tx_ring->q_swbd[0];
52                 }
53
54                 tx_frm_cnt++;
55         }
56
57         tx_ring->next_to_clean = i;
58         return tx_frm_cnt++;
59 }
60
61 uint16_t
62 enetc_xmit_pkts(void *tx_queue,
63                 struct rte_mbuf **tx_pkts,
64                 uint16_t nb_pkts)
65 {
66         struct enetc_swbd *tx_swbd;
67         int i, start, bds_to_use;
68         struct enetc_tx_bd *txbd;
69         struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
70
71         i = tx_ring->next_to_use;
72
73         bds_to_use = enetc_bd_unused(tx_ring);
74         if (bds_to_use < nb_pkts)
75                 nb_pkts = bds_to_use;
76
77         start = 0;
78         while (nb_pkts--) {
79                 tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
80                 txbd = ENETC_TXBD(*tx_ring, i);
81                 tx_swbd = &tx_ring->q_swbd[i];
82                 txbd->frm_len = tx_pkts[start]->pkt_len;
83                 txbd->buf_len = txbd->frm_len;
84                 txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
85                 txbd->addr = (uint64_t)(uintptr_t)
86                 rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
87                                  tx_swbd->buffer_addr->data_off);
88                 i++;
89                 start++;
90                 if (unlikely(i == tx_ring->bd_count))
91                         i = 0;
92         }
93
94         /* we're only cleaning up the Tx ring here, on the assumption that
95          * software is slower than hardware and hardware completed sending
96          * older frames out by now.
97          * We're also cleaning up the ring before kicking off Tx for the new
98          * batch to minimize chances of contention on the Tx ring
99          */
100         enetc_clean_tx_ring(tx_ring);
101
102         tx_ring->next_to_use = i;
103         enetc_wr_reg(tx_ring->tcir, i);
104         return start;
105 }
106
107 int
108 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
109 {
110         struct enetc_swbd *rx_swbd;
111         union enetc_rx_bd *rxbd;
112         int i, j;
113
114         i = rx_ring->next_to_use;
115         rx_swbd = &rx_ring->q_swbd[i];
116         rxbd = ENETC_RXBD(*rx_ring, i);
117         for (j = 0; j < buff_cnt; j++) {
118                 rx_swbd->buffer_addr = (void *)(uintptr_t)
119                         rte_cpu_to_le_64((uint64_t)(uintptr_t)
120                                         rte_pktmbuf_alloc(rx_ring->mb_pool));
121                 rxbd->w.addr = (uint64_t)(uintptr_t)
122                                rx_swbd->buffer_addr->buf_iova +
123                                rx_swbd->buffer_addr->data_off;
124                 /* clear 'R" as well */
125                 rxbd->r.lstatus = 0;
126                 rx_swbd++;
127                 rxbd++;
128                 i++;
129                 if (unlikely(i == rx_ring->bd_count)) {
130                         i = 0;
131                         rxbd = ENETC_RXBD(*rx_ring, 0);
132                         rx_swbd = &rx_ring->q_swbd[i];
133                 }
134         }
135
136         if (likely(j)) {
137                 rx_ring->next_to_alloc = i;
138                 rx_ring->next_to_use = i;
139                 enetc_wr_reg(rx_ring->rcir, i);
140         }
141
142         return j;
143 }
144
145 static inline void enetc_slow_parsing(struct rte_mbuf *m,
146                                      uint64_t parse_results)
147 {
148         m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
149
150         switch (parse_results) {
151         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
152                 m->packet_type = RTE_PTYPE_L2_ETHER |
153                                  RTE_PTYPE_L3_IPV4;
154                 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
155                 return;
156         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
157                 m->packet_type = RTE_PTYPE_L2_ETHER |
158                                  RTE_PTYPE_L3_IPV6;
159                 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
160                 return;
161         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
162                 m->packet_type = RTE_PTYPE_L2_ETHER |
163                                  RTE_PTYPE_L3_IPV4 |
164                                  RTE_PTYPE_L4_TCP;
165                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
166                                PKT_RX_L4_CKSUM_BAD;
167                 return;
168         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
169                 m->packet_type = RTE_PTYPE_L2_ETHER |
170                                  RTE_PTYPE_L3_IPV6 |
171                                  RTE_PTYPE_L4_TCP;
172                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
173                                PKT_RX_L4_CKSUM_BAD;
174                 return;
175         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
176                 m->packet_type = RTE_PTYPE_L2_ETHER |
177                                  RTE_PTYPE_L3_IPV4 |
178                                  RTE_PTYPE_L4_UDP;
179                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
180                                PKT_RX_L4_CKSUM_BAD;
181                 return;
182         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
183                 m->packet_type = RTE_PTYPE_L2_ETHER |
184                                  RTE_PTYPE_L3_IPV6 |
185                                  RTE_PTYPE_L4_UDP;
186                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
187                                PKT_RX_L4_CKSUM_BAD;
188                 return;
189         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
190                 m->packet_type = RTE_PTYPE_L2_ETHER |
191                                  RTE_PTYPE_L3_IPV4 |
192                                  RTE_PTYPE_L4_SCTP;
193                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
194                                PKT_RX_L4_CKSUM_BAD;
195                 return;
196         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
197                 m->packet_type = RTE_PTYPE_L2_ETHER |
198                                  RTE_PTYPE_L3_IPV6 |
199                                  RTE_PTYPE_L4_SCTP;
200                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
201                                PKT_RX_L4_CKSUM_BAD;
202                 return;
203         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
204                 m->packet_type = RTE_PTYPE_L2_ETHER |
205                                  RTE_PTYPE_L3_IPV4 |
206                                  RTE_PTYPE_L4_ICMP;
207                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
208                                PKT_RX_L4_CKSUM_BAD;
209                 return;
210         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
211                 m->packet_type = RTE_PTYPE_L2_ETHER |
212                                  RTE_PTYPE_L3_IPV6 |
213                                  RTE_PTYPE_L4_ICMP;
214                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
215                                PKT_RX_L4_CKSUM_BAD;
216                 return;
217         /* More switch cases can be added */
218         default:
219                 m->packet_type = RTE_PTYPE_UNKNOWN;
220                 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
221                                PKT_RX_L4_CKSUM_UNKNOWN;
222         }
223 }
224
225
226 static inline void __attribute__((hot))
227 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
228 {
229         ENETC_PMD_DP_DEBUG("parse summary = 0x%x   ", parse_results);
230         m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
231
232         switch (parse_results) {
233         case ENETC_PKT_TYPE_ETHER:
234                 m->packet_type = RTE_PTYPE_L2_ETHER;
235                 return;
236         case ENETC_PKT_TYPE_IPV4:
237                 m->packet_type = RTE_PTYPE_L2_ETHER |
238                                  RTE_PTYPE_L3_IPV4;
239                 return;
240         case ENETC_PKT_TYPE_IPV6:
241                 m->packet_type = RTE_PTYPE_L2_ETHER |
242                                  RTE_PTYPE_L3_IPV6;
243                 return;
244         case ENETC_PKT_TYPE_IPV4_TCP:
245                 m->packet_type = RTE_PTYPE_L2_ETHER |
246                                  RTE_PTYPE_L3_IPV4 |
247                                  RTE_PTYPE_L4_TCP;
248                 return;
249         case ENETC_PKT_TYPE_IPV6_TCP:
250                 m->packet_type = RTE_PTYPE_L2_ETHER |
251                                  RTE_PTYPE_L3_IPV6 |
252                                  RTE_PTYPE_L4_TCP;
253                 return;
254         case ENETC_PKT_TYPE_IPV4_UDP:
255                 m->packet_type = RTE_PTYPE_L2_ETHER |
256                                  RTE_PTYPE_L3_IPV4 |
257                                  RTE_PTYPE_L4_UDP;
258                 return;
259         case ENETC_PKT_TYPE_IPV6_UDP:
260                 m->packet_type = RTE_PTYPE_L2_ETHER |
261                                  RTE_PTYPE_L3_IPV6 |
262                                  RTE_PTYPE_L4_UDP;
263                 return;
264         case ENETC_PKT_TYPE_IPV4_SCTP:
265                 m->packet_type = RTE_PTYPE_L2_ETHER |
266                                  RTE_PTYPE_L3_IPV4 |
267                                  RTE_PTYPE_L4_SCTP;
268                 return;
269         case ENETC_PKT_TYPE_IPV6_SCTP:
270                 m->packet_type = RTE_PTYPE_L2_ETHER |
271                                  RTE_PTYPE_L3_IPV6 |
272                                  RTE_PTYPE_L4_SCTP;
273                 return;
274         case ENETC_PKT_TYPE_IPV4_ICMP:
275                 m->packet_type = RTE_PTYPE_L2_ETHER |
276                                  RTE_PTYPE_L3_IPV4 |
277                                  RTE_PTYPE_L4_ICMP;
278                 return;
279         case ENETC_PKT_TYPE_IPV6_ICMP:
280                 m->packet_type = RTE_PTYPE_L2_ETHER |
281                                  RTE_PTYPE_L3_IPV6 |
282                                  RTE_PTYPE_L4_ICMP;
283                 return;
284         /* More switch cases can be added */
285         default:
286                 enetc_slow_parsing(m, parse_results);
287         }
288
289 }
290
291 static int
292 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
293                     struct rte_mbuf **rx_pkts,
294                     int work_limit)
295 {
296         int rx_frm_cnt = 0;
297         int cleaned_cnt, i;
298         struct enetc_swbd *rx_swbd;
299
300         cleaned_cnt = enetc_bd_unused(rx_ring);
301         /* next descriptor to process */
302         i = rx_ring->next_to_clean;
303         rx_swbd = &rx_ring->q_swbd[i];
304         while (likely(rx_frm_cnt < work_limit)) {
305                 union enetc_rx_bd *rxbd;
306                 uint32_t bd_status;
307
308                 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
309                         int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
310
311                         cleaned_cnt -= count;
312                 }
313
314                 rxbd = ENETC_RXBD(*rx_ring, i);
315                 bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
316                 if (!bd_status)
317                         break;
318
319                 rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
320                                                 rx_ring->crc_len;
321                 rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
322                                                  rx_ring->crc_len;
323                 rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
324                 rx_swbd->buffer_addr->ol_flags = 0;
325                 enetc_dev_rx_parse(rx_swbd->buffer_addr,
326                                    rxbd->r.parse_summary);
327                 rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
328                 cleaned_cnt++;
329                 rx_swbd++;
330                 i++;
331                 if (unlikely(i == rx_ring->bd_count)) {
332                         i = 0;
333                         rx_swbd = &rx_ring->q_swbd[i];
334                 }
335
336                 rx_ring->next_to_clean = i;
337                 rx_frm_cnt++;
338         }
339
340         return rx_frm_cnt;
341 }
342
343 uint16_t
344 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
345                 uint16_t nb_pkts)
346 {
347         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
348
349         return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);
350 }