net/enetc: do not stall in clean Tx ring
[dpdk.git] / drivers / net / enetc / enetc_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4
5 #include <stdbool.h>
6 #include <stdint.h>
7 #include <unistd.h>
8
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
12
13 #include "base/enetc_hw.h"
14 #include "enetc.h"
15 #include "enetc_logs.h"
16
17 #define ENETC_RXBD_BUNDLE 8 /* Number of BDs to update at once */
18
19 static int
20 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
21 {
22         int tx_frm_cnt = 0;
23         struct enetc_swbd *tx_swbd;
24         int i, hwci;
25
26         i = tx_ring->next_to_clean;
27         tx_swbd = &tx_ring->q_swbd[i];
28
29         hwci = (int)(enetc_rd_reg(tx_ring->tcisr) &
30                      ENETC_TBCISR_IDX_MASK);
31
32         /* we're only reading the CI index once here, which means HW may update
33          * it while we're doing clean-up.  We could read the register in a loop
34          * but for now I assume it's OK to leave a few Tx frames for next call.
35          * The issue with reading the register in a loop is that we're stalling
36          * here trying to catch up with HW which keeps sending traffic as long
37          * as it has traffic to send, so in effect we could be waiting here for
38          * the Tx ring to be drained by HW, instead of us doing Rx in that
39          * meantime.
40          */
41         while (i != hwci) {
42                 rte_pktmbuf_free(tx_swbd->buffer_addr);
43                 tx_swbd->buffer_addr = NULL;
44                 tx_swbd++;
45                 i++;
46                 if (unlikely(i == tx_ring->bd_count)) {
47                         i = 0;
48                         tx_swbd = &tx_ring->q_swbd[0];
49                 }
50
51                 tx_frm_cnt++;
52         }
53
54         tx_ring->next_to_clean = i;
55         return tx_frm_cnt++;
56 }
57
58 uint16_t
59 enetc_xmit_pkts(void *tx_queue,
60                 struct rte_mbuf **tx_pkts,
61                 uint16_t nb_pkts)
62 {
63         struct enetc_swbd *tx_swbd;
64         int i, start, bds_to_use;
65         struct enetc_tx_bd *txbd;
66         struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
67
68         i = tx_ring->next_to_use;
69
70         bds_to_use = enetc_bd_unused(tx_ring);
71         if (bds_to_use < nb_pkts)
72                 nb_pkts = bds_to_use;
73
74         start = 0;
75         while (nb_pkts--) {
76                 enetc_clean_tx_ring(tx_ring);
77                 tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
78                 txbd = ENETC_TXBD(*tx_ring, i);
79                 tx_swbd = &tx_ring->q_swbd[i];
80                 txbd->frm_len = tx_pkts[start]->pkt_len;
81                 txbd->buf_len = txbd->frm_len;
82                 txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
83                 txbd->addr = (uint64_t)(uintptr_t)
84                 rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
85                                  tx_swbd->buffer_addr->data_off);
86                 i++;
87                 start++;
88                 if (unlikely(i == tx_ring->bd_count))
89                         i = 0;
90         }
91
92         tx_ring->next_to_use = i;
93         enetc_wr_reg(tx_ring->tcir, i);
94         return start;
95 }
96
97 int
98 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
99 {
100         struct enetc_swbd *rx_swbd;
101         union enetc_rx_bd *rxbd;
102         int i, j;
103
104         i = rx_ring->next_to_use;
105         rx_swbd = &rx_ring->q_swbd[i];
106         rxbd = ENETC_RXBD(*rx_ring, i);
107         for (j = 0; j < buff_cnt; j++) {
108                 rx_swbd->buffer_addr = (void *)(uintptr_t)
109                         rte_cpu_to_le_64((uint64_t)(uintptr_t)
110                                         rte_pktmbuf_alloc(rx_ring->mb_pool));
111                 rxbd->w.addr = (uint64_t)(uintptr_t)
112                                rx_swbd->buffer_addr->buf_iova +
113                                rx_swbd->buffer_addr->data_off;
114                 /* clear 'R" as well */
115                 rxbd->r.lstatus = 0;
116                 rx_swbd++;
117                 rxbd++;
118                 i++;
119                 if (unlikely(i == rx_ring->bd_count)) {
120                         i = 0;
121                         rxbd = ENETC_RXBD(*rx_ring, 0);
122                         rx_swbd = &rx_ring->q_swbd[i];
123                 }
124         }
125
126         if (likely(j)) {
127                 rx_ring->next_to_alloc = i;
128                 rx_ring->next_to_use = i;
129                 enetc_wr_reg(rx_ring->rcir, i);
130         }
131
132         return j;
133 }
134
135 static inline void enetc_slow_parsing(struct rte_mbuf *m,
136                                      uint64_t parse_results)
137 {
138         m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
139
140         switch (parse_results) {
141         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
142                 m->packet_type = RTE_PTYPE_L2_ETHER |
143                                  RTE_PTYPE_L3_IPV4;
144                 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
145                 return;
146         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
147                 m->packet_type = RTE_PTYPE_L2_ETHER |
148                                  RTE_PTYPE_L3_IPV6;
149                 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
150                 return;
151         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
152                 m->packet_type = RTE_PTYPE_L2_ETHER |
153                                  RTE_PTYPE_L3_IPV4 |
154                                  RTE_PTYPE_L4_TCP;
155                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
156                                PKT_RX_L4_CKSUM_BAD;
157                 return;
158         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
159                 m->packet_type = RTE_PTYPE_L2_ETHER |
160                                  RTE_PTYPE_L3_IPV6 |
161                                  RTE_PTYPE_L4_TCP;
162                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
163                                PKT_RX_L4_CKSUM_BAD;
164                 return;
165         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
166                 m->packet_type = RTE_PTYPE_L2_ETHER |
167                                  RTE_PTYPE_L3_IPV4 |
168                                  RTE_PTYPE_L4_UDP;
169                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
170                                PKT_RX_L4_CKSUM_BAD;
171                 return;
172         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
173                 m->packet_type = RTE_PTYPE_L2_ETHER |
174                                  RTE_PTYPE_L3_IPV6 |
175                                  RTE_PTYPE_L4_UDP;
176                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
177                                PKT_RX_L4_CKSUM_BAD;
178                 return;
179         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
180                 m->packet_type = RTE_PTYPE_L2_ETHER |
181                                  RTE_PTYPE_L3_IPV4 |
182                                  RTE_PTYPE_L4_SCTP;
183                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
184                                PKT_RX_L4_CKSUM_BAD;
185                 return;
186         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
187                 m->packet_type = RTE_PTYPE_L2_ETHER |
188                                  RTE_PTYPE_L3_IPV6 |
189                                  RTE_PTYPE_L4_SCTP;
190                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
191                                PKT_RX_L4_CKSUM_BAD;
192                 return;
193         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
194                 m->packet_type = RTE_PTYPE_L2_ETHER |
195                                  RTE_PTYPE_L3_IPV4 |
196                                  RTE_PTYPE_L4_ICMP;
197                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
198                                PKT_RX_L4_CKSUM_BAD;
199                 return;
200         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
201                 m->packet_type = RTE_PTYPE_L2_ETHER |
202                                  RTE_PTYPE_L3_IPV6 |
203                                  RTE_PTYPE_L4_ICMP;
204                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
205                                PKT_RX_L4_CKSUM_BAD;
206                 return;
207         /* More switch cases can be added */
208         default:
209                 m->packet_type = RTE_PTYPE_UNKNOWN;
210                 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
211                                PKT_RX_L4_CKSUM_UNKNOWN;
212         }
213 }
214
215
216 static inline void __attribute__((hot))
217 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
218 {
219         ENETC_PMD_DP_DEBUG("parse summary = 0x%x   ", parse_results);
220         m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
221
222         switch (parse_results) {
223         case ENETC_PKT_TYPE_ETHER:
224                 m->packet_type = RTE_PTYPE_L2_ETHER;
225                 return;
226         case ENETC_PKT_TYPE_IPV4:
227                 m->packet_type = RTE_PTYPE_L2_ETHER |
228                                  RTE_PTYPE_L3_IPV4;
229                 return;
230         case ENETC_PKT_TYPE_IPV6:
231                 m->packet_type = RTE_PTYPE_L2_ETHER |
232                                  RTE_PTYPE_L3_IPV6;
233                 return;
234         case ENETC_PKT_TYPE_IPV4_TCP:
235                 m->packet_type = RTE_PTYPE_L2_ETHER |
236                                  RTE_PTYPE_L3_IPV4 |
237                                  RTE_PTYPE_L4_TCP;
238                 return;
239         case ENETC_PKT_TYPE_IPV6_TCP:
240                 m->packet_type = RTE_PTYPE_L2_ETHER |
241                                  RTE_PTYPE_L3_IPV6 |
242                                  RTE_PTYPE_L4_TCP;
243                 return;
244         case ENETC_PKT_TYPE_IPV4_UDP:
245                 m->packet_type = RTE_PTYPE_L2_ETHER |
246                                  RTE_PTYPE_L3_IPV4 |
247                                  RTE_PTYPE_L4_UDP;
248                 return;
249         case ENETC_PKT_TYPE_IPV6_UDP:
250                 m->packet_type = RTE_PTYPE_L2_ETHER |
251                                  RTE_PTYPE_L3_IPV6 |
252                                  RTE_PTYPE_L4_UDP;
253                 return;
254         case ENETC_PKT_TYPE_IPV4_SCTP:
255                 m->packet_type = RTE_PTYPE_L2_ETHER |
256                                  RTE_PTYPE_L3_IPV4 |
257                                  RTE_PTYPE_L4_SCTP;
258                 return;
259         case ENETC_PKT_TYPE_IPV6_SCTP:
260                 m->packet_type = RTE_PTYPE_L2_ETHER |
261                                  RTE_PTYPE_L3_IPV6 |
262                                  RTE_PTYPE_L4_SCTP;
263                 return;
264         case ENETC_PKT_TYPE_IPV4_ICMP:
265                 m->packet_type = RTE_PTYPE_L2_ETHER |
266                                  RTE_PTYPE_L3_IPV4 |
267                                  RTE_PTYPE_L4_ICMP;
268                 return;
269         case ENETC_PKT_TYPE_IPV6_ICMP:
270                 m->packet_type = RTE_PTYPE_L2_ETHER |
271                                  RTE_PTYPE_L3_IPV6 |
272                                  RTE_PTYPE_L4_ICMP;
273                 return;
274         /* More switch cases can be added */
275         default:
276                 enetc_slow_parsing(m, parse_results);
277         }
278
279 }
280
281 static int
282 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
283                     struct rte_mbuf **rx_pkts,
284                     int work_limit)
285 {
286         int rx_frm_cnt = 0;
287         int cleaned_cnt, i;
288         struct enetc_swbd *rx_swbd;
289
290         cleaned_cnt = enetc_bd_unused(rx_ring);
291         /* next descriptor to process */
292         i = rx_ring->next_to_clean;
293         rx_swbd = &rx_ring->q_swbd[i];
294         while (likely(rx_frm_cnt < work_limit)) {
295                 union enetc_rx_bd *rxbd;
296                 uint32_t bd_status;
297
298                 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
299                         int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
300
301                         cleaned_cnt -= count;
302                 }
303
304                 rxbd = ENETC_RXBD(*rx_ring, i);
305                 bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
306                 if (!bd_status)
307                         break;
308
309                 rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
310                                                 rx_ring->crc_len;
311                 rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
312                                                  rx_ring->crc_len;
313                 rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
314                 rx_swbd->buffer_addr->ol_flags = 0;
315                 enetc_dev_rx_parse(rx_swbd->buffer_addr,
316                                    rxbd->r.parse_summary);
317                 rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
318                 cleaned_cnt++;
319                 rx_swbd++;
320                 i++;
321                 if (unlikely(i == rx_ring->bd_count)) {
322                         i = 0;
323                         rx_swbd = &rx_ring->q_swbd[i];
324                 }
325
326                 rx_ring->next_to_clean = i;
327                 rx_frm_cnt++;
328         }
329
330         return rx_frm_cnt;
331 }
332
333 uint16_t
334 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
335                 uint16_t nb_pkts)
336 {
337         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
338
339         return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);
340 }