net/enetc: relax read for Tx CI in clean Tx
[dpdk.git] / drivers / net / enetc / enetc_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4
5 #include <stdbool.h>
6 #include <stdint.h>
7 #include <unistd.h>
8
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
12
13 #include "base/enetc_hw.h"
14 #include "enetc.h"
15 #include "enetc_logs.h"
16
17 #define ENETC_RXBD_BUNDLE 8 /* Number of BDs to update at once */
18
19 static int
20 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
21 {
22         int tx_frm_cnt = 0;
23         struct enetc_swbd *tx_swbd;
24         int i, hwci;
25
26         /* we don't need barriers here, we just want a relatively current value
27          * from HW.
28          */
29         hwci = (int)(rte_read32_relaxed(tx_ring->tcisr) &
30                      ENETC_TBCISR_IDX_MASK);
31
32         i = tx_ring->next_to_clean;
33         tx_swbd = &tx_ring->q_swbd[i];
34
35         /* we're only reading the CI index once here, which means HW may update
36          * it while we're doing clean-up.  We could read the register in a loop
37          * but for now I assume it's OK to leave a few Tx frames for next call.
38          * The issue with reading the register in a loop is that we're stalling
39          * here trying to catch up with HW which keeps sending traffic as long
40          * as it has traffic to send, so in effect we could be waiting here for
41          * the Tx ring to be drained by HW, instead of us doing Rx in that
42          * meantime.
43          */
44         while (i != hwci) {
45                 rte_pktmbuf_free(tx_swbd->buffer_addr);
46                 tx_swbd->buffer_addr = NULL;
47                 tx_swbd++;
48                 i++;
49                 if (unlikely(i == tx_ring->bd_count)) {
50                         i = 0;
51                         tx_swbd = &tx_ring->q_swbd[0];
52                 }
53
54                 tx_frm_cnt++;
55         }
56
57         tx_ring->next_to_clean = i;
58         return tx_frm_cnt++;
59 }
60
61 uint16_t
62 enetc_xmit_pkts(void *tx_queue,
63                 struct rte_mbuf **tx_pkts,
64                 uint16_t nb_pkts)
65 {
66         struct enetc_swbd *tx_swbd;
67         int i, start, bds_to_use;
68         struct enetc_tx_bd *txbd;
69         struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
70
71         i = tx_ring->next_to_use;
72
73         bds_to_use = enetc_bd_unused(tx_ring);
74         if (bds_to_use < nb_pkts)
75                 nb_pkts = bds_to_use;
76
77         start = 0;
78         while (nb_pkts--) {
79                 enetc_clean_tx_ring(tx_ring);
80                 tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
81                 txbd = ENETC_TXBD(*tx_ring, i);
82                 tx_swbd = &tx_ring->q_swbd[i];
83                 txbd->frm_len = tx_pkts[start]->pkt_len;
84                 txbd->buf_len = txbd->frm_len;
85                 txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
86                 txbd->addr = (uint64_t)(uintptr_t)
87                 rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
88                                  tx_swbd->buffer_addr->data_off);
89                 i++;
90                 start++;
91                 if (unlikely(i == tx_ring->bd_count))
92                         i = 0;
93         }
94
95         tx_ring->next_to_use = i;
96         enetc_wr_reg(tx_ring->tcir, i);
97         return start;
98 }
99
100 int
101 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
102 {
103         struct enetc_swbd *rx_swbd;
104         union enetc_rx_bd *rxbd;
105         int i, j;
106
107         i = rx_ring->next_to_use;
108         rx_swbd = &rx_ring->q_swbd[i];
109         rxbd = ENETC_RXBD(*rx_ring, i);
110         for (j = 0; j < buff_cnt; j++) {
111                 rx_swbd->buffer_addr = (void *)(uintptr_t)
112                         rte_cpu_to_le_64((uint64_t)(uintptr_t)
113                                         rte_pktmbuf_alloc(rx_ring->mb_pool));
114                 rxbd->w.addr = (uint64_t)(uintptr_t)
115                                rx_swbd->buffer_addr->buf_iova +
116                                rx_swbd->buffer_addr->data_off;
117                 /* clear 'R" as well */
118                 rxbd->r.lstatus = 0;
119                 rx_swbd++;
120                 rxbd++;
121                 i++;
122                 if (unlikely(i == rx_ring->bd_count)) {
123                         i = 0;
124                         rxbd = ENETC_RXBD(*rx_ring, 0);
125                         rx_swbd = &rx_ring->q_swbd[i];
126                 }
127         }
128
129         if (likely(j)) {
130                 rx_ring->next_to_alloc = i;
131                 rx_ring->next_to_use = i;
132                 enetc_wr_reg(rx_ring->rcir, i);
133         }
134
135         return j;
136 }
137
138 static inline void enetc_slow_parsing(struct rte_mbuf *m,
139                                      uint64_t parse_results)
140 {
141         m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
142
143         switch (parse_results) {
144         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
145                 m->packet_type = RTE_PTYPE_L2_ETHER |
146                                  RTE_PTYPE_L3_IPV4;
147                 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
148                 return;
149         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
150                 m->packet_type = RTE_PTYPE_L2_ETHER |
151                                  RTE_PTYPE_L3_IPV6;
152                 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
153                 return;
154         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
155                 m->packet_type = RTE_PTYPE_L2_ETHER |
156                                  RTE_PTYPE_L3_IPV4 |
157                                  RTE_PTYPE_L4_TCP;
158                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
159                                PKT_RX_L4_CKSUM_BAD;
160                 return;
161         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
162                 m->packet_type = RTE_PTYPE_L2_ETHER |
163                                  RTE_PTYPE_L3_IPV6 |
164                                  RTE_PTYPE_L4_TCP;
165                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
166                                PKT_RX_L4_CKSUM_BAD;
167                 return;
168         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
169                 m->packet_type = RTE_PTYPE_L2_ETHER |
170                                  RTE_PTYPE_L3_IPV4 |
171                                  RTE_PTYPE_L4_UDP;
172                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
173                                PKT_RX_L4_CKSUM_BAD;
174                 return;
175         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
176                 m->packet_type = RTE_PTYPE_L2_ETHER |
177                                  RTE_PTYPE_L3_IPV6 |
178                                  RTE_PTYPE_L4_UDP;
179                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
180                                PKT_RX_L4_CKSUM_BAD;
181                 return;
182         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
183                 m->packet_type = RTE_PTYPE_L2_ETHER |
184                                  RTE_PTYPE_L3_IPV4 |
185                                  RTE_PTYPE_L4_SCTP;
186                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
187                                PKT_RX_L4_CKSUM_BAD;
188                 return;
189         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
190                 m->packet_type = RTE_PTYPE_L2_ETHER |
191                                  RTE_PTYPE_L3_IPV6 |
192                                  RTE_PTYPE_L4_SCTP;
193                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
194                                PKT_RX_L4_CKSUM_BAD;
195                 return;
196         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
197                 m->packet_type = RTE_PTYPE_L2_ETHER |
198                                  RTE_PTYPE_L3_IPV4 |
199                                  RTE_PTYPE_L4_ICMP;
200                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
201                                PKT_RX_L4_CKSUM_BAD;
202                 return;
203         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
204                 m->packet_type = RTE_PTYPE_L2_ETHER |
205                                  RTE_PTYPE_L3_IPV6 |
206                                  RTE_PTYPE_L4_ICMP;
207                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
208                                PKT_RX_L4_CKSUM_BAD;
209                 return;
210         /* More switch cases can be added */
211         default:
212                 m->packet_type = RTE_PTYPE_UNKNOWN;
213                 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
214                                PKT_RX_L4_CKSUM_UNKNOWN;
215         }
216 }
217
218
219 static inline void __attribute__((hot))
220 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
221 {
222         ENETC_PMD_DP_DEBUG("parse summary = 0x%x   ", parse_results);
223         m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
224
225         switch (parse_results) {
226         case ENETC_PKT_TYPE_ETHER:
227                 m->packet_type = RTE_PTYPE_L2_ETHER;
228                 return;
229         case ENETC_PKT_TYPE_IPV4:
230                 m->packet_type = RTE_PTYPE_L2_ETHER |
231                                  RTE_PTYPE_L3_IPV4;
232                 return;
233         case ENETC_PKT_TYPE_IPV6:
234                 m->packet_type = RTE_PTYPE_L2_ETHER |
235                                  RTE_PTYPE_L3_IPV6;
236                 return;
237         case ENETC_PKT_TYPE_IPV4_TCP:
238                 m->packet_type = RTE_PTYPE_L2_ETHER |
239                                  RTE_PTYPE_L3_IPV4 |
240                                  RTE_PTYPE_L4_TCP;
241                 return;
242         case ENETC_PKT_TYPE_IPV6_TCP:
243                 m->packet_type = RTE_PTYPE_L2_ETHER |
244                                  RTE_PTYPE_L3_IPV6 |
245                                  RTE_PTYPE_L4_TCP;
246                 return;
247         case ENETC_PKT_TYPE_IPV4_UDP:
248                 m->packet_type = RTE_PTYPE_L2_ETHER |
249                                  RTE_PTYPE_L3_IPV4 |
250                                  RTE_PTYPE_L4_UDP;
251                 return;
252         case ENETC_PKT_TYPE_IPV6_UDP:
253                 m->packet_type = RTE_PTYPE_L2_ETHER |
254                                  RTE_PTYPE_L3_IPV6 |
255                                  RTE_PTYPE_L4_UDP;
256                 return;
257         case ENETC_PKT_TYPE_IPV4_SCTP:
258                 m->packet_type = RTE_PTYPE_L2_ETHER |
259                                  RTE_PTYPE_L3_IPV4 |
260                                  RTE_PTYPE_L4_SCTP;
261                 return;
262         case ENETC_PKT_TYPE_IPV6_SCTP:
263                 m->packet_type = RTE_PTYPE_L2_ETHER |
264                                  RTE_PTYPE_L3_IPV6 |
265                                  RTE_PTYPE_L4_SCTP;
266                 return;
267         case ENETC_PKT_TYPE_IPV4_ICMP:
268                 m->packet_type = RTE_PTYPE_L2_ETHER |
269                                  RTE_PTYPE_L3_IPV4 |
270                                  RTE_PTYPE_L4_ICMP;
271                 return;
272         case ENETC_PKT_TYPE_IPV6_ICMP:
273                 m->packet_type = RTE_PTYPE_L2_ETHER |
274                                  RTE_PTYPE_L3_IPV6 |
275                                  RTE_PTYPE_L4_ICMP;
276                 return;
277         /* More switch cases can be added */
278         default:
279                 enetc_slow_parsing(m, parse_results);
280         }
281
282 }
283
284 static int
285 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
286                     struct rte_mbuf **rx_pkts,
287                     int work_limit)
288 {
289         int rx_frm_cnt = 0;
290         int cleaned_cnt, i;
291         struct enetc_swbd *rx_swbd;
292
293         cleaned_cnt = enetc_bd_unused(rx_ring);
294         /* next descriptor to process */
295         i = rx_ring->next_to_clean;
296         rx_swbd = &rx_ring->q_swbd[i];
297         while (likely(rx_frm_cnt < work_limit)) {
298                 union enetc_rx_bd *rxbd;
299                 uint32_t bd_status;
300
301                 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
302                         int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
303
304                         cleaned_cnt -= count;
305                 }
306
307                 rxbd = ENETC_RXBD(*rx_ring, i);
308                 bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
309                 if (!bd_status)
310                         break;
311
312                 rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
313                                                 rx_ring->crc_len;
314                 rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
315                                                  rx_ring->crc_len;
316                 rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
317                 rx_swbd->buffer_addr->ol_flags = 0;
318                 enetc_dev_rx_parse(rx_swbd->buffer_addr,
319                                    rxbd->r.parse_summary);
320                 rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
321                 cleaned_cnt++;
322                 rx_swbd++;
323                 i++;
324                 if (unlikely(i == rx_ring->bd_count)) {
325                         i = 0;
326                         rx_swbd = &rx_ring->q_swbd[i];
327                 }
328
329                 rx_ring->next_to_clean = i;
330                 rx_frm_cnt++;
331         }
332
333         return rx_frm_cnt;
334 }
335
336 uint16_t
337 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
338                 uint16_t nb_pkts)
339 {
340         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
341
342         return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);
343 }