net/enetc: enable Rx checksum offload validation
[dpdk.git] / drivers / net / enetc / enetc_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <stdbool.h>
6 #include <stdint.h>
7 #include <unistd.h>
8
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
12
13 #include "base/enetc_hw.h"
14 #include "enetc.h"
15 #include "enetc_logs.h"
16
17 #define ENETC_RXBD_BUNDLE 8 /* Number of BDs to update at once */
18
19 static int
20 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
21 {
22         int tx_frm_cnt = 0;
23         struct enetc_swbd *tx_swbd;
24         int i;
25
26         i = tx_ring->next_to_clean;
27         tx_swbd = &tx_ring->q_swbd[i];
28         while ((int)(enetc_rd_reg(tx_ring->tcisr) &
29                ENETC_TBCISR_IDX_MASK) != i) {
30                 rte_pktmbuf_free(tx_swbd->buffer_addr);
31                 tx_swbd->buffer_addr = NULL;
32                 tx_swbd++;
33                 i++;
34                 if (unlikely(i == tx_ring->bd_count)) {
35                         i = 0;
36                         tx_swbd = &tx_ring->q_swbd[0];
37                 }
38
39                 tx_frm_cnt++;
40         }
41
42         tx_ring->next_to_clean = i;
43         return tx_frm_cnt++;
44 }
45
46 uint16_t
47 enetc_xmit_pkts(void *tx_queue,
48                 struct rte_mbuf **tx_pkts,
49                 uint16_t nb_pkts)
50 {
51         struct enetc_swbd *tx_swbd;
52         int i, start;
53         struct enetc_tx_bd *txbd;
54         struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
55
56         i = tx_ring->next_to_use;
57         start = 0;
58         while (nb_pkts--) {
59                 enetc_clean_tx_ring(tx_ring);
60                 tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
61                 txbd = ENETC_TXBD(*tx_ring, i);
62                 tx_swbd = &tx_ring->q_swbd[i];
63                 txbd->frm_len = tx_pkts[start]->pkt_len;
64                 txbd->buf_len = txbd->frm_len;
65                 txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
66                 txbd->addr = (uint64_t)(uintptr_t)
67                 rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
68                                  tx_swbd->buffer_addr->data_off);
69                 i++;
70                 start++;
71                 if (unlikely(i == tx_ring->bd_count))
72                         i = 0;
73         }
74
75         tx_ring->next_to_use = i;
76         enetc_wr_reg(tx_ring->tcir, i);
77         return start;
78 }
79
80 int
81 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
82 {
83         struct enetc_swbd *rx_swbd;
84         union enetc_rx_bd *rxbd;
85         int i, j;
86
87         i = rx_ring->next_to_use;
88         rx_swbd = &rx_ring->q_swbd[i];
89         rxbd = ENETC_RXBD(*rx_ring, i);
90         for (j = 0; j < buff_cnt; j++) {
91                 rx_swbd->buffer_addr = (void *)(uintptr_t)
92                         rte_cpu_to_le_64((uint64_t)(uintptr_t)
93                                         rte_pktmbuf_alloc(rx_ring->mb_pool));
94                 rxbd->w.addr = (uint64_t)(uintptr_t)
95                                rx_swbd->buffer_addr->buf_iova +
96                                rx_swbd->buffer_addr->data_off;
97                 /* clear 'R" as well */
98                 rxbd->r.lstatus = 0;
99                 rx_swbd++;
100                 rxbd++;
101                 i++;
102                 if (unlikely(i == rx_ring->bd_count)) {
103                         i = 0;
104                         rxbd = ENETC_RXBD(*rx_ring, 0);
105                         rx_swbd = &rx_ring->q_swbd[i];
106                 }
107         }
108
109         if (likely(j)) {
110                 rx_ring->next_to_alloc = i;
111                 rx_ring->next_to_use = i;
112                 enetc_wr_reg(rx_ring->rcir, i);
113         }
114
115         return j;
116 }
117
118 static inline void enetc_slow_parsing(struct rte_mbuf *m,
119                                      uint64_t parse_results)
120 {
121         m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
122
123         switch (parse_results) {
124         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
125                 m->packet_type = RTE_PTYPE_L2_ETHER |
126                                  RTE_PTYPE_L3_IPV4;
127                 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
128                 return;
129         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
130                 m->packet_type = RTE_PTYPE_L2_ETHER |
131                                  RTE_PTYPE_L3_IPV6;
132                 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
133                 return;
134         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
135                 m->packet_type = RTE_PTYPE_L2_ETHER |
136                                  RTE_PTYPE_L3_IPV4 |
137                                  RTE_PTYPE_L4_TCP;
138                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
139                                PKT_RX_L4_CKSUM_BAD;
140                 return;
141         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
142                 m->packet_type = RTE_PTYPE_L2_ETHER |
143                                  RTE_PTYPE_L3_IPV6 |
144                                  RTE_PTYPE_L4_TCP;
145                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
146                                PKT_RX_L4_CKSUM_BAD;
147                 return;
148         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
149                 m->packet_type = RTE_PTYPE_L2_ETHER |
150                                  RTE_PTYPE_L3_IPV4 |
151                                  RTE_PTYPE_L4_UDP;
152                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
153                                PKT_RX_L4_CKSUM_BAD;
154                 return;
155         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
156                 m->packet_type = RTE_PTYPE_L2_ETHER |
157                                  RTE_PTYPE_L3_IPV6 |
158                                  RTE_PTYPE_L4_UDP;
159                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
160                                PKT_RX_L4_CKSUM_BAD;
161                 return;
162         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
163                 m->packet_type = RTE_PTYPE_L2_ETHER |
164                                  RTE_PTYPE_L3_IPV4 |
165                                  RTE_PTYPE_L4_SCTP;
166                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
167                                PKT_RX_L4_CKSUM_BAD;
168                 return;
169         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
170                 m->packet_type = RTE_PTYPE_L2_ETHER |
171                                  RTE_PTYPE_L3_IPV6 |
172                                  RTE_PTYPE_L4_SCTP;
173                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
174                                PKT_RX_L4_CKSUM_BAD;
175                 return;
176         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
177                 m->packet_type = RTE_PTYPE_L2_ETHER |
178                                  RTE_PTYPE_L3_IPV4 |
179                                  RTE_PTYPE_L4_ICMP;
180                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
181                                PKT_RX_L4_CKSUM_BAD;
182                 return;
183         case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
184                 m->packet_type = RTE_PTYPE_L2_ETHER |
185                                  RTE_PTYPE_L3_IPV6 |
186                                  RTE_PTYPE_L4_ICMP;
187                 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
188                                PKT_RX_L4_CKSUM_BAD;
189                 return;
190         /* More switch cases can be added */
191         default:
192                 m->packet_type = RTE_PTYPE_UNKNOWN;
193                 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
194                                PKT_RX_L4_CKSUM_UNKNOWN;
195         }
196 }
197
198
199 static inline void __attribute__((hot))
200 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
201 {
202         ENETC_PMD_DP_DEBUG("parse summary = 0x%x   ", parse_results);
203         m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
204
205         switch (parse_results) {
206         case ENETC_PKT_TYPE_ETHER:
207                 m->packet_type = RTE_PTYPE_L2_ETHER;
208                 return;
209         case ENETC_PKT_TYPE_IPV4:
210                 m->packet_type = RTE_PTYPE_L2_ETHER |
211                                  RTE_PTYPE_L3_IPV4;
212                 return;
213         case ENETC_PKT_TYPE_IPV6:
214                 m->packet_type = RTE_PTYPE_L2_ETHER |
215                                  RTE_PTYPE_L3_IPV6;
216                 return;
217         case ENETC_PKT_TYPE_IPV4_TCP:
218                 m->packet_type = RTE_PTYPE_L2_ETHER |
219                                  RTE_PTYPE_L3_IPV4 |
220                                  RTE_PTYPE_L4_TCP;
221                 return;
222         case ENETC_PKT_TYPE_IPV6_TCP:
223                 m->packet_type = RTE_PTYPE_L2_ETHER |
224                                  RTE_PTYPE_L3_IPV6 |
225                                  RTE_PTYPE_L4_TCP;
226                 return;
227         case ENETC_PKT_TYPE_IPV4_UDP:
228                 m->packet_type = RTE_PTYPE_L2_ETHER |
229                                  RTE_PTYPE_L3_IPV4 |
230                                  RTE_PTYPE_L4_UDP;
231                 return;
232         case ENETC_PKT_TYPE_IPV6_UDP:
233                 m->packet_type = RTE_PTYPE_L2_ETHER |
234                                  RTE_PTYPE_L3_IPV6 |
235                                  RTE_PTYPE_L4_UDP;
236                 return;
237         case ENETC_PKT_TYPE_IPV4_SCTP:
238                 m->packet_type = RTE_PTYPE_L2_ETHER |
239                                  RTE_PTYPE_L3_IPV4 |
240                                  RTE_PTYPE_L4_SCTP;
241                 return;
242         case ENETC_PKT_TYPE_IPV6_SCTP:
243                 m->packet_type = RTE_PTYPE_L2_ETHER |
244                                  RTE_PTYPE_L3_IPV6 |
245                                  RTE_PTYPE_L4_SCTP;
246                 return;
247         case ENETC_PKT_TYPE_IPV4_ICMP:
248                 m->packet_type = RTE_PTYPE_L2_ETHER |
249                                  RTE_PTYPE_L3_IPV4 |
250                                  RTE_PTYPE_L4_ICMP;
251                 return;
252         case ENETC_PKT_TYPE_IPV6_ICMP:
253                 m->packet_type = RTE_PTYPE_L2_ETHER |
254                                  RTE_PTYPE_L3_IPV6 |
255                                  RTE_PTYPE_L4_ICMP;
256                 return;
257         /* More switch cases can be added */
258         default:
259                 enetc_slow_parsing(m, parse_results);
260         }
261
262 }
263
264 static int
265 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
266                     struct rte_mbuf **rx_pkts,
267                     int work_limit)
268 {
269         int rx_frm_cnt = 0;
270         int cleaned_cnt, i;
271         struct enetc_swbd *rx_swbd;
272
273         cleaned_cnt = enetc_bd_unused(rx_ring);
274         /* next descriptor to process */
275         i = rx_ring->next_to_clean;
276         rx_swbd = &rx_ring->q_swbd[i];
277         while (likely(rx_frm_cnt < work_limit)) {
278                 union enetc_rx_bd *rxbd;
279                 uint32_t bd_status;
280
281                 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
282                         int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
283
284                         cleaned_cnt -= count;
285                 }
286
287                 rxbd = ENETC_RXBD(*rx_ring, i);
288                 bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
289                 if (!bd_status)
290                         break;
291
292                 rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
293                                                 rx_ring->crc_len;
294                 rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
295                                                  rx_ring->crc_len;
296                 rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
297                 rx_swbd->buffer_addr->ol_flags = 0;
298                 enetc_dev_rx_parse(rx_swbd->buffer_addr,
299                                    rxbd->r.parse_summary);
300                 rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
301                 cleaned_cnt++;
302                 rx_swbd++;
303                 i++;
304                 if (unlikely(i == rx_ring->bd_count)) {
305                         i = 0;
306                         rx_swbd = &rx_ring->q_swbd[i];
307                 }
308
309                 rx_ring->next_to_clean = i;
310                 rx_frm_cnt++;
311         }
312
313         return rx_frm_cnt;
314 }
315
316 uint16_t
317 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
318                 uint16_t nb_pkts)
319 {
320         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
321
322         return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);
323 }