8b85c5371951267d29b9c2016d27009f3fa9ce81
[dpdk.git] / drivers / net / enetc / enetc_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4
5 #include <stdbool.h>
6 #include <stdint.h>
7 #include <unistd.h>
8
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
12
13 #include "base/enetc_hw.h"
14 #include "enetc.h"
15 #include "enetc_logs.h"
16
17 #define ENETC_RXBD_BUNDLE 16 /* Number of buffers to allocate at once */
18
19 static int
20 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
21 {
22         int tx_frm_cnt = 0;
23         struct enetc_swbd *tx_swbd;
24         int i, hwci;
25
26         /* we don't need barriers here, we just want a relatively current value
27          * from HW.
28          */
29         hwci = (int)(rte_read32_relaxed(tx_ring->tcisr) &
30                      ENETC_TBCISR_IDX_MASK);
31
32         i = tx_ring->next_to_clean;
33         tx_swbd = &tx_ring->q_swbd[i];
34
35         /* we're only reading the CI index once here, which means HW may update
36          * it while we're doing clean-up.  We could read the register in a loop
37          * but for now I assume it's OK to leave a few Tx frames for next call.
38          * The issue with reading the register in a loop is that we're stalling
39          * here trying to catch up with HW which keeps sending traffic as long
40          * as it has traffic to send, so in effect we could be waiting here for
41          * the Tx ring to be drained by HW, instead of us doing Rx in that
42          * meantime.
43          */
44         while (i != hwci) {
45                 rte_pktmbuf_free(tx_swbd->buffer_addr);
46                 tx_swbd->buffer_addr = NULL;
47                 tx_swbd++;
48                 i++;
49                 if (unlikely(i == tx_ring->bd_count)) {
50                         i = 0;
51                         tx_swbd = &tx_ring->q_swbd[0];
52                 }
53
54                 tx_frm_cnt++;
55         }
56
57         tx_ring->next_to_clean = i;
58         return tx_frm_cnt++;
59 }
60
61 uint16_t
62 enetc_xmit_pkts(void *tx_queue,
63                 struct rte_mbuf **tx_pkts,
64                 uint16_t nb_pkts)
65 {
66         struct enetc_swbd *tx_swbd;
67         int i, start, bds_to_use;
68         struct enetc_tx_bd *txbd;
69         struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
70
71         i = tx_ring->next_to_use;
72
73         bds_to_use = enetc_bd_unused(tx_ring);
74         if (bds_to_use < nb_pkts)
75                 nb_pkts = bds_to_use;
76
77         start = 0;
78         while (nb_pkts--) {
79                 tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
80                 txbd = ENETC_TXBD(*tx_ring, i);
81                 tx_swbd = &tx_ring->q_swbd[i];
82                 txbd->frm_len = tx_pkts[start]->pkt_len;
83                 txbd->buf_len = txbd->frm_len;
84                 txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
85                 txbd->addr = (uint64_t)(uintptr_t)
86                 rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
87                                  tx_swbd->buffer_addr->data_off);
88                 i++;
89                 start++;
90                 if (unlikely(i == tx_ring->bd_count))
91                         i = 0;
92         }
93
94         /* we're only cleaning up the Tx ring here, on the assumption that
95          * software is slower than hardware and hardware completed sending
96          * older frames out by now.
97          * We're also cleaning up the ring before kicking off Tx for the new
98          * batch to minimize chances of contention on the Tx ring
99          */
100         enetc_clean_tx_ring(tx_ring);
101
102         tx_ring->next_to_use = i;
103         enetc_wr_reg(tx_ring->tcir, i);
104         return start;
105 }
106
107 int
108 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
109 {
110         struct enetc_swbd *rx_swbd;
111         union enetc_rx_bd *rxbd;
112         int i, j, k = ENETC_RXBD_BUNDLE;
113         struct rte_mbuf *m[ENETC_RXBD_BUNDLE];
114         struct rte_mempool *mb_pool;
115
116         i = rx_ring->next_to_use;
117         mb_pool = rx_ring->mb_pool;
118         rx_swbd = &rx_ring->q_swbd[i];
119         rxbd = ENETC_RXBD(*rx_ring, i);
120         for (j = 0; j < buff_cnt; j++) {
121                 /* bulk alloc for the next up to 8 BDs */
122                 if (k == ENETC_RXBD_BUNDLE) {
123                         k = 0;
124                         int m_cnt = RTE_MIN(buff_cnt - j, ENETC_RXBD_BUNDLE);
125
126                         if (rte_pktmbuf_alloc_bulk(mb_pool, m, m_cnt))
127                                 return -1;
128                 }
129
130                 rx_swbd->buffer_addr = m[k];
131                 rxbd->w.addr = (uint64_t)(uintptr_t)
132                                rx_swbd->buffer_addr->buf_iova +
133                                rx_swbd->buffer_addr->data_off;
134                 /* clear 'R" as well */
135                 rxbd->r.lstatus = 0;
136                 rx_swbd++;
137                 rxbd++;
138                 i++;
139                 k++;
140                 if (unlikely(i == rx_ring->bd_count)) {
141                         i = 0;
142                         rxbd = ENETC_RXBD(*rx_ring, 0);
143                         rx_swbd = &rx_ring->q_swbd[i];
144                 }
145         }
146
147         if (likely(j)) {
148                 rx_ring->next_to_alloc = i;
149                 rx_ring->next_to_use = i;
150                 enetc_wr_reg(rx_ring->rcir, i);
151         }
152
153         return j;
154 }
155
156 static inline void enetc_slow_parsing(struct rte_mbuf *m,
157                                      uint64_t parse_results)
158 {
159         m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
160
161         switch (parse_results) {
162         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
163                 m->packet_type = RTE_PTYPE_L2_ETHER |
164                                  RTE_PTYPE_L3_IPV4;
165                 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
166                 return;
167         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
168                 m->packet_type = RTE_PTYPE_L2_ETHER |
169                                  RTE_PTYPE_L3_IPV6;
170                 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
171                 return;
172         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
173                 m->packet_type = RTE_PTYPE_L2_ETHER |
174                                  RTE_PTYPE_L3_IPV4 |
175                                  RTE_PTYPE_L4_TCP;
176                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
177                                PKT_RX_L4_CKSUM_BAD;
178                 return;
179         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
180                 m->packet_type = RTE_PTYPE_L2_ETHER |
181                                  RTE_PTYPE_L3_IPV6 |
182                                  RTE_PTYPE_L4_TCP;
183                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
184                                PKT_RX_L4_CKSUM_BAD;
185                 return;
186         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
187                 m->packet_type = RTE_PTYPE_L2_ETHER |
188                                  RTE_PTYPE_L3_IPV4 |
189                                  RTE_PTYPE_L4_UDP;
190                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
191                                PKT_RX_L4_CKSUM_BAD;
192                 return;
193         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
194                 m->packet_type = RTE_PTYPE_L2_ETHER |
195                                  RTE_PTYPE_L3_IPV6 |
196                                  RTE_PTYPE_L4_UDP;
197                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
198                                PKT_RX_L4_CKSUM_BAD;
199                 return;
200         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
201                 m->packet_type = RTE_PTYPE_L2_ETHER |
202                                  RTE_PTYPE_L3_IPV4 |
203                                  RTE_PTYPE_L4_SCTP;
204                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
205                                PKT_RX_L4_CKSUM_BAD;
206                 return;
207         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
208                 m->packet_type = RTE_PTYPE_L2_ETHER |
209                                  RTE_PTYPE_L3_IPV6 |
210                                  RTE_PTYPE_L4_SCTP;
211                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
212                                PKT_RX_L4_CKSUM_BAD;
213                 return;
214         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
215                 m->packet_type = RTE_PTYPE_L2_ETHER |
216                                  RTE_PTYPE_L3_IPV4 |
217                                  RTE_PTYPE_L4_ICMP;
218                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
219                                PKT_RX_L4_CKSUM_BAD;
220                 return;
221         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
222                 m->packet_type = RTE_PTYPE_L2_ETHER |
223                                  RTE_PTYPE_L3_IPV6 |
224                                  RTE_PTYPE_L4_ICMP;
225                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
226                                PKT_RX_L4_CKSUM_BAD;
227                 return;
228         /* More switch cases can be added */
229         default:
230                 m->packet_type = RTE_PTYPE_UNKNOWN;
231                 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
232                                PKT_RX_L4_CKSUM_UNKNOWN;
233         }
234 }
235
236
237 static inline void __attribute__((hot))
238 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
239 {
240         ENETC_PMD_DP_DEBUG("parse summary = 0x%x   ", parse_results);
241         m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
242
243         switch (parse_results) {
244         case ENETC_PKT_TYPE_ETHER:
245                 m->packet_type = RTE_PTYPE_L2_ETHER;
246                 return;
247         case ENETC_PKT_TYPE_IPV4:
248                 m->packet_type = RTE_PTYPE_L2_ETHER |
249                                  RTE_PTYPE_L3_IPV4;
250                 return;
251         case ENETC_PKT_TYPE_IPV6:
252                 m->packet_type = RTE_PTYPE_L2_ETHER |
253                                  RTE_PTYPE_L3_IPV6;
254                 return;
255         case ENETC_PKT_TYPE_IPV4_TCP:
256                 m->packet_type = RTE_PTYPE_L2_ETHER |
257                                  RTE_PTYPE_L3_IPV4 |
258                                  RTE_PTYPE_L4_TCP;
259                 return;
260         case ENETC_PKT_TYPE_IPV6_TCP:
261                 m->packet_type = RTE_PTYPE_L2_ETHER |
262                                  RTE_PTYPE_L3_IPV6 |
263                                  RTE_PTYPE_L4_TCP;
264                 return;
265         case ENETC_PKT_TYPE_IPV4_UDP:
266                 m->packet_type = RTE_PTYPE_L2_ETHER |
267                                  RTE_PTYPE_L3_IPV4 |
268                                  RTE_PTYPE_L4_UDP;
269                 return;
270         case ENETC_PKT_TYPE_IPV6_UDP:
271                 m->packet_type = RTE_PTYPE_L2_ETHER |
272                                  RTE_PTYPE_L3_IPV6 |
273                                  RTE_PTYPE_L4_UDP;
274                 return;
275         case ENETC_PKT_TYPE_IPV4_SCTP:
276                 m->packet_type = RTE_PTYPE_L2_ETHER |
277                                  RTE_PTYPE_L3_IPV4 |
278                                  RTE_PTYPE_L4_SCTP;
279                 return;
280         case ENETC_PKT_TYPE_IPV6_SCTP:
281                 m->packet_type = RTE_PTYPE_L2_ETHER |
282                                  RTE_PTYPE_L3_IPV6 |
283                                  RTE_PTYPE_L4_SCTP;
284                 return;
285         case ENETC_PKT_TYPE_IPV4_ICMP:
286                 m->packet_type = RTE_PTYPE_L2_ETHER |
287                                  RTE_PTYPE_L3_IPV4 |
288                                  RTE_PTYPE_L4_ICMP;
289                 return;
290         case ENETC_PKT_TYPE_IPV6_ICMP:
291                 m->packet_type = RTE_PTYPE_L2_ETHER |
292                                  RTE_PTYPE_L3_IPV6 |
293                                  RTE_PTYPE_L4_ICMP;
294                 return;
295         /* More switch cases can be added */
296         default:
297                 enetc_slow_parsing(m, parse_results);
298         }
299
300 }
301
302 static int
303 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
304                     struct rte_mbuf **rx_pkts,
305                     int work_limit)
306 {
307         int rx_frm_cnt = 0;
308         int cleaned_cnt, i;
309         struct enetc_swbd *rx_swbd;
310
311         cleaned_cnt = enetc_bd_unused(rx_ring);
312         /* next descriptor to process */
313         i = rx_ring->next_to_clean;
314         rx_swbd = &rx_ring->q_swbd[i];
315         while (likely(rx_frm_cnt < work_limit)) {
316                 union enetc_rx_bd *rxbd;
317                 uint32_t bd_status;
318
319                 rxbd = ENETC_RXBD(*rx_ring, i);
320                 bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
321                 if (!bd_status)
322                         break;
323
324                 rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
325                                                 rx_ring->crc_len;
326                 rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
327                                                  rx_ring->crc_len;
328                 rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
329                 rx_swbd->buffer_addr->ol_flags = 0;
330                 enetc_dev_rx_parse(rx_swbd->buffer_addr,
331                                    rxbd->r.parse_summary);
332                 rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
333                 cleaned_cnt++;
334                 rx_swbd++;
335                 i++;
336                 if (unlikely(i == rx_ring->bd_count)) {
337                         i = 0;
338                         rx_swbd = &rx_ring->q_swbd[i];
339                 }
340
341                 rx_ring->next_to_clean = i;
342                 rx_frm_cnt++;
343         }
344
345         enetc_refill_rx_ring(rx_ring, cleaned_cnt);
346
347         return rx_frm_cnt;
348 }
349
350 uint16_t
351 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
352                 uint16_t nb_pkts)
353 {
354         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
355
356         return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);
357 }