net: add macro for VLAN header length
[dpdk.git] / drivers / net / enetfec / enet_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 NXP
3  */
4
5 #include <rte_mbuf.h>
6 #include <rte_io.h>
7 #include <ethdev_driver.h>
8 #include "enet_regs.h"
9 #include "enet_ethdev.h"
10 #include "enet_pmd_logs.h"
11
12 /* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue
13  * When update through the ring, just set the empty indicator.
14  */
15 uint16_t
16 enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts,
17                 uint16_t nb_pkts)
18 {
19         struct rte_mempool *pool;
20         struct bufdesc *bdp;
21         struct rte_mbuf *mbuf, *new_mbuf = NULL;
22         unsigned short status;
23         unsigned short pkt_len;
24         int pkt_received = 0, index = 0;
25         void *data, *mbuf_data;
26         uint16_t vlan_tag;
27         struct  bufdesc_ex *ebdp = NULL;
28         bool    vlan_packet_rcvd = false;
29         struct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;
30         struct rte_eth_stats *stats = &rxq->fep->stats;
31         struct rte_eth_conf *eth_conf = &rxq->fep->dev->data->dev_conf;
32         uint64_t rx_offloads = eth_conf->rxmode.offloads;
33         pool = rxq->pool;
34         bdp = rxq->bd.cur;
35
36         /* Process the incoming packet */
37         status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
38         while ((status & RX_BD_EMPTY) == 0) {
39                 if (pkt_received >= nb_pkts)
40                         break;
41
42                 new_mbuf = rte_pktmbuf_alloc(pool);
43                 if (unlikely(new_mbuf == NULL)) {
44                         stats->rx_nombuf++;
45                         break;
46                 }
47                 /* Check for errors. */
48                 status ^= RX_BD_LAST;
49                 if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
50                         RX_BD_CR | RX_BD_OV | RX_BD_LAST |
51                         RX_BD_TR)) {
52                         stats->ierrors++;
53                         if (status & RX_BD_OV) {
54                                 /* FIFO overrun */
55                                 /* enet_dump_rx(rxq); */
56                                 ENETFEC_DP_LOG(DEBUG, "rx_fifo_error");
57                                 goto rx_processing_done;
58                         }
59                         if (status & (RX_BD_LG | RX_BD_SH
60                                                 | RX_BD_LAST)) {
61                                 /* Frame too long or too short. */
62                                 ENETFEC_DP_LOG(DEBUG, "rx_length_error");
63                                 if (status & RX_BD_LAST)
64                                         ENETFEC_DP_LOG(DEBUG, "rcv is not +last");
65                         }
66                         if (status & RX_BD_CR) {     /* CRC Error */
67                                 ENETFEC_DP_LOG(DEBUG, "rx_crc_errors");
68                         }
69                         /* Report late collisions as a frame error. */
70                         if (status & (RX_BD_NO | RX_BD_TR))
71                                 ENETFEC_DP_LOG(DEBUG, "rx_frame_error");
72                         goto rx_processing_done;
73                 }
74
75                 /* Process the incoming frame. */
76                 stats->ipackets++;
77                 pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));
78                 stats->ibytes += pkt_len;
79
80                 /* shows data with respect to the data_off field. */
81                 index = enet_get_bd_index(bdp, &rxq->bd);
82                 mbuf = rxq->rx_mbuf[index];
83
84                 data = rte_pktmbuf_mtod(mbuf, uint8_t *);
85                 mbuf_data = data;
86                 rte_prefetch0(data);
87                 rte_pktmbuf_append((struct rte_mbuf *)mbuf,
88                                 pkt_len - 4);
89
90                 if (rxq->fep->quirks & QUIRK_RACC)
91                         data = rte_pktmbuf_adj(mbuf, 2);
92
93                 rx_pkts[pkt_received] = mbuf;
94                 pkt_received++;
95
96                 /* Extract the enhanced buffer descriptor */
97                 ebdp = NULL;
98                 if (rxq->fep->bufdesc_ex)
99                         ebdp = (struct bufdesc_ex *)bdp;
100
101                 /* If this is a VLAN packet remove the VLAN Tag */
102                 vlan_packet_rcvd = false;
103                 if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN) &&
104                                 rxq->fep->bufdesc_ex &&
105                                 (rte_read32(&ebdp->bd_esc) &
106                                 rte_cpu_to_le_32(BD_ENETFEC_RX_VLAN))) {
107                         /* Push and remove the vlan tag */
108                         struct rte_vlan_hdr *vlan_header =
109                                 (struct rte_vlan_hdr *)
110                                 ((uint8_t *)data + ETH_HLEN);
111                         vlan_tag = rte_be_to_cpu_16(vlan_header->vlan_tci);
112
113                         vlan_packet_rcvd = true;
114                         memmove((uint8_t *)mbuf_data + RTE_VLAN_HLEN,
115                                 data, RTE_ETHER_ADDR_LEN * 2);
116                         rte_pktmbuf_adj(mbuf, RTE_VLAN_HLEN);
117                 }
118
119                 if (rxq->fep->bufdesc_ex &&
120                         (rxq->fep->flag_csum & RX_FLAG_CSUM_EN)) {
121                         if ((rte_read32(&ebdp->bd_esc) &
122                                 rte_cpu_to_le_32(RX_FLAG_CSUM_ERR)) == 0) {
123                                 /* don't check it */
124                                 mbuf->ol_flags = RTE_MBUF_F_RX_IP_CKSUM_BAD;
125                         } else {
126                                 mbuf->ol_flags = RTE_MBUF_F_RX_IP_CKSUM_GOOD;
127                         }
128                 }
129
130                 /* Handle received VLAN packets */
131                 if (vlan_packet_rcvd) {
132                         mbuf->vlan_tci = vlan_tag;
133                         mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED
134                                                 | RTE_MBUF_F_RX_VLAN;
135                 }
136
137                 rxq->rx_mbuf[index] = new_mbuf;
138                 rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
139                                 &bdp->bd_bufaddr);
140 rx_processing_done:
141                 /* when rx_processing_done clear the status flags
142                  * for this buffer
143                  */
144                 status &= ~RX_BD_STATS;
145
146                 /* Mark the buffer empty */
147                 status |= RX_BD_EMPTY;
148
149                 if (rxq->fep->bufdesc_ex) {
150                         struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
151                         rte_write32(rte_cpu_to_le_32(RX_BD_INT),
152                                     &ebdp->bd_esc);
153                         rte_write32(0, &ebdp->bd_prot);
154                         rte_write32(0, &ebdp->bd_bdu);
155                 }
156
157                 /* Make sure the updates to rest of the descriptor are
158                  * performed before transferring ownership.
159                  */
160                 rte_wmb();
161                 rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
162
163                 /* Update BD pointer to next entry */
164                 bdp = enet_get_nextdesc(bdp, &rxq->bd);
165
166                 /* Doing this here will keep the FEC running while we process
167                  * incoming frames.
168                  */
169                 rte_write32(0, rxq->bd.active_reg_desc);
170                 status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
171         }
172         rxq->bd.cur = bdp;
173         return pkt_received;
174 }
175
176 uint16_t
177 enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
178 {
179         struct enetfec_priv_tx_q *txq  =
180                         (struct enetfec_priv_tx_q *)tx_queue;
181         struct rte_eth_stats *stats = &txq->fep->stats;
182         struct bufdesc *bdp, *last_bdp;
183         struct rte_mbuf *mbuf;
184         unsigned short status;
185         unsigned short buflen;
186         unsigned int index, estatus = 0;
187         unsigned int i, pkt_transmitted = 0;
188         uint8_t *data;
189         int tx_st = 1;
190
191         while (tx_st) {
192                 if (pkt_transmitted >= nb_pkts) {
193                         tx_st = 0;
194                         break;
195                 }
196                 bdp = txq->bd.cur;
197                 /* First clean the ring */
198                 index = enet_get_bd_index(bdp, &txq->bd);
199                 status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
200
201                 if (status & TX_BD_READY) {
202                         stats->oerrors++;
203                         break;
204                 }
205                 if (txq->tx_mbuf[index]) {
206                         rte_pktmbuf_free(txq->tx_mbuf[index]);
207                         txq->tx_mbuf[index] = NULL;
208                 }
209
210                 mbuf = *(tx_pkts);
211                 tx_pkts++;
212
213                 /* Fill in a Tx ring entry */
214                 last_bdp = bdp;
215                 status &= ~TX_BD_STATS;
216
217                 /* Set buffer length and buffer pointer */
218                 buflen = rte_pktmbuf_pkt_len(mbuf);
219                 stats->opackets++;
220                 stats->obytes += buflen;
221
222                 if (mbuf->nb_segs > 1) {
223                         ENETFEC_DP_LOG(DEBUG, "SG not supported");
224                         return -1;
225                 }
226                 status |= (TX_BD_LAST);
227                 data = rte_pktmbuf_mtod(mbuf, void *);
228                 for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)
229                         dcbf(data + i);
230
231                 rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
232                             &bdp->bd_bufaddr);
233                 rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen);
234
235                 if (txq->fep->bufdesc_ex) {
236                         struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
237
238                         if (mbuf->ol_flags == RTE_MBUF_F_RX_IP_CKSUM_GOOD)
239                                 estatus |= TX_BD_PINS | TX_BD_IINS;
240
241                         rte_write32(0, &ebdp->bd_bdu);
242                         rte_write32(rte_cpu_to_le_32(estatus),
243                                     &ebdp->bd_esc);
244                 }
245
246                 index = enet_get_bd_index(last_bdp, &txq->bd);
247                 /* Save mbuf pointer */
248                 txq->tx_mbuf[index] = mbuf;
249
250                 /* Make sure the updates to rest of the descriptor are performed
251                  * before transferring ownership.
252                  */
253                 status |= (TX_BD_READY | TX_BD_TC);
254                 rte_wmb();
255                 rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
256
257                 /* Trigger transmission start */
258                 rte_write32(0, txq->bd.active_reg_desc);
259                 pkt_transmitted++;
260
261                 /* If this was the last BD in the ring, start at the
262                  * beginning again.
263                  */
264                 bdp = enet_get_nextdesc(last_bdp, &txq->bd);
265
266                 /* Make sure the update to bdp and tx_skbuff are performed
267                  * before txq->bd.cur.
268                  */
269                 txq->bd.cur = bdp;
270         }
271         return nb_pkts;
272 }