1 /* SPDX-License-Identifier: BSD-3-Clause
7 #include <ethdev_driver.h>
9 #include "enet_ethdev.h"
10 #include "enet_pmd_logs.h"
12 /* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue
13 * When update through the ring, just set the empty indicator.
16 enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts,
19 struct rte_mempool *pool;
21 struct rte_mbuf *mbuf, *new_mbuf = NULL;
22 unsigned short status;
23 unsigned short pkt_len;
24 int pkt_received = 0, index = 0;
25 void *data, *mbuf_data;
27 struct bufdesc_ex *ebdp = NULL;
28 bool vlan_packet_rcvd = false;
29 struct enetfec_priv_rx_q *rxq = (struct enetfec_priv_rx_q *)rxq1;
30 struct rte_eth_stats *stats = &rxq->fep->stats;
31 struct rte_eth_conf *eth_conf = &rxq->fep->dev->data->dev_conf;
32 uint64_t rx_offloads = eth_conf->rxmode.offloads;
36 /* Process the incoming packet */
37 status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
38 while ((status & RX_BD_EMPTY) == 0) {
39 if (pkt_received >= nb_pkts)
42 new_mbuf = rte_pktmbuf_alloc(pool);
43 if (unlikely(new_mbuf == NULL)) {
47 /* Check for errors. */
49 if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
50 RX_BD_CR | RX_BD_OV | RX_BD_LAST |
53 if (status & RX_BD_OV) {
55 /* enet_dump_rx(rxq); */
56 ENETFEC_DP_LOG(DEBUG, "rx_fifo_error");
57 goto rx_processing_done;
59 if (status & (RX_BD_LG | RX_BD_SH
61 /* Frame too long or too short. */
62 ENETFEC_DP_LOG(DEBUG, "rx_length_error");
63 if (status & RX_BD_LAST)
64 ENETFEC_DP_LOG(DEBUG, "rcv is not +last");
66 if (status & RX_BD_CR) { /* CRC Error */
67 ENETFEC_DP_LOG(DEBUG, "rx_crc_errors");
69 /* Report late collisions as a frame error. */
70 if (status & (RX_BD_NO | RX_BD_TR))
71 ENETFEC_DP_LOG(DEBUG, "rx_frame_error");
72 goto rx_processing_done;
75 /* Process the incoming frame. */
77 pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));
78 stats->ibytes += pkt_len;
80 /* shows data with respect to the data_off field. */
81 index = enet_get_bd_index(bdp, &rxq->bd);
82 mbuf = rxq->rx_mbuf[index];
84 data = rte_pktmbuf_mtod(mbuf, uint8_t *);
87 rte_pktmbuf_append((struct rte_mbuf *)mbuf,
90 if (rxq->fep->quirks & QUIRK_RACC)
91 data = rte_pktmbuf_adj(mbuf, 2);
93 rx_pkts[pkt_received] = mbuf;
96 /* Extract the enhanced buffer descriptor */
98 if (rxq->fep->bufdesc_ex)
99 ebdp = (struct bufdesc_ex *)bdp;
101 /* If this is a VLAN packet remove the VLAN Tag */
102 vlan_packet_rcvd = false;
103 if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN) &&
104 rxq->fep->bufdesc_ex &&
105 (rte_read32(&ebdp->bd_esc) &
106 rte_cpu_to_le_32(BD_ENETFEC_RX_VLAN))) {
107 /* Push and remove the vlan tag */
108 struct rte_vlan_hdr *vlan_header =
109 (struct rte_vlan_hdr *)
110 ((uint8_t *)data + ETH_HLEN);
111 vlan_tag = rte_be_to_cpu_16(vlan_header->vlan_tci);
113 vlan_packet_rcvd = true;
114 memmove((uint8_t *)mbuf_data + RTE_VLAN_HLEN,
115 data, RTE_ETHER_ADDR_LEN * 2);
116 rte_pktmbuf_adj(mbuf, RTE_VLAN_HLEN);
119 if (rxq->fep->bufdesc_ex &&
120 (rxq->fep->flag_csum & RX_FLAG_CSUM_EN)) {
121 if ((rte_read32(&ebdp->bd_esc) &
122 rte_cpu_to_le_32(RX_FLAG_CSUM_ERR)) == 0) {
124 mbuf->ol_flags = RTE_MBUF_F_RX_IP_CKSUM_BAD;
126 mbuf->ol_flags = RTE_MBUF_F_RX_IP_CKSUM_GOOD;
130 /* Handle received VLAN packets */
131 if (vlan_packet_rcvd) {
132 mbuf->vlan_tci = vlan_tag;
133 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED
134 | RTE_MBUF_F_RX_VLAN;
137 rxq->rx_mbuf[index] = new_mbuf;
138 rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
141 /* when rx_processing_done clear the status flags
144 status &= ~RX_BD_STATS;
146 /* Mark the buffer empty */
147 status |= RX_BD_EMPTY;
149 if (rxq->fep->bufdesc_ex) {
150 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
151 rte_write32(rte_cpu_to_le_32(RX_BD_INT),
153 rte_write32(0, &ebdp->bd_prot);
154 rte_write32(0, &ebdp->bd_bdu);
157 /* Make sure the updates to rest of the descriptor are
158 * performed before transferring ownership.
161 rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
163 /* Update BD pointer to next entry */
164 bdp = enet_get_nextdesc(bdp, &rxq->bd);
166 /* Doing this here will keep the FEC running while we process
169 rte_write32(0, rxq->bd.active_reg_desc);
170 status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
177 enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
179 struct enetfec_priv_tx_q *txq =
180 (struct enetfec_priv_tx_q *)tx_queue;
181 struct rte_eth_stats *stats = &txq->fep->stats;
182 struct bufdesc *bdp, *last_bdp;
183 struct rte_mbuf *mbuf;
184 unsigned short status;
185 unsigned short buflen;
186 unsigned int index, estatus = 0;
187 unsigned int i, pkt_transmitted = 0;
192 if (pkt_transmitted >= nb_pkts) {
197 /* First clean the ring */
198 index = enet_get_bd_index(bdp, &txq->bd);
199 status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
201 if (status & TX_BD_READY) {
205 if (txq->tx_mbuf[index]) {
206 rte_pktmbuf_free(txq->tx_mbuf[index]);
207 txq->tx_mbuf[index] = NULL;
213 /* Fill in a Tx ring entry */
215 status &= ~TX_BD_STATS;
217 /* Set buffer length and buffer pointer */
218 buflen = rte_pktmbuf_pkt_len(mbuf);
220 stats->obytes += buflen;
222 if (mbuf->nb_segs > 1) {
223 ENETFEC_DP_LOG(DEBUG, "SG not supported");
226 status |= (TX_BD_LAST);
227 data = rte_pktmbuf_mtod(mbuf, void *);
228 for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)
231 rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
233 rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen);
235 if (txq->fep->bufdesc_ex) {
236 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
238 if (mbuf->ol_flags == RTE_MBUF_F_RX_IP_CKSUM_GOOD)
239 estatus |= TX_BD_PINS | TX_BD_IINS;
241 rte_write32(0, &ebdp->bd_bdu);
242 rte_write32(rte_cpu_to_le_32(estatus),
246 index = enet_get_bd_index(last_bdp, &txq->bd);
247 /* Save mbuf pointer */
248 txq->tx_mbuf[index] = mbuf;
250 /* Make sure the updates to rest of the descriptor are performed
251 * before transferring ownership.
253 status |= (TX_BD_READY | TX_BD_TC);
255 rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
257 /* Trigger transmission start */
258 rte_write32(0, txq->bd.active_reg_desc);
261 /* If this was the last BD in the ring, start at the
264 bdp = enet_get_nextdesc(last_bdp, &txq->bd);
266 /* Make sure the update to bdp and tx_skbuff are performed
267 * before txq->bd.cur.