1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
13 #include "base/enetc_hw.h"
15 #include "enetc_logs.h"
17 #define ENETC_RXBD_BUNDLE 8 /* Number of BDs to update at once */
20 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
23 struct enetc_swbd *tx_swbd;
26 i = tx_ring->next_to_clean;
27 tx_swbd = &tx_ring->q_swbd[i];
29 hwci = (int)(enetc_rd_reg(tx_ring->tcisr) &
30 ENETC_TBCISR_IDX_MASK);
32 /* we're only reading the CI index once here, which means HW may update
33 * it while we're doing clean-up. We could read the register in a loop
34 * but for now I assume it's OK to leave a few Tx frames for next call.
35 * The issue with reading the register in a loop is that we're stalling
36 * here trying to catch up with HW which keeps sending traffic as long
37 * as it has traffic to send, so in effect we could be waiting here for
38 * the Tx ring to be drained by HW, instead of us doing Rx in that
42 rte_pktmbuf_free(tx_swbd->buffer_addr);
43 tx_swbd->buffer_addr = NULL;
46 if (unlikely(i == tx_ring->bd_count)) {
48 tx_swbd = &tx_ring->q_swbd[0];
54 tx_ring->next_to_clean = i;
59 enetc_xmit_pkts(void *tx_queue,
60 struct rte_mbuf **tx_pkts,
63 struct enetc_swbd *tx_swbd;
64 int i, start, bds_to_use;
65 struct enetc_tx_bd *txbd;
66 struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
68 i = tx_ring->next_to_use;
70 bds_to_use = enetc_bd_unused(tx_ring);
71 if (bds_to_use < nb_pkts)
76 enetc_clean_tx_ring(tx_ring);
77 tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
78 txbd = ENETC_TXBD(*tx_ring, i);
79 tx_swbd = &tx_ring->q_swbd[i];
80 txbd->frm_len = tx_pkts[start]->pkt_len;
81 txbd->buf_len = txbd->frm_len;
82 txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
83 txbd->addr = (uint64_t)(uintptr_t)
84 rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
85 tx_swbd->buffer_addr->data_off);
88 if (unlikely(i == tx_ring->bd_count))
92 tx_ring->next_to_use = i;
93 enetc_wr_reg(tx_ring->tcir, i);
98 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
100 struct enetc_swbd *rx_swbd;
101 union enetc_rx_bd *rxbd;
104 i = rx_ring->next_to_use;
105 rx_swbd = &rx_ring->q_swbd[i];
106 rxbd = ENETC_RXBD(*rx_ring, i);
107 for (j = 0; j < buff_cnt; j++) {
108 rx_swbd->buffer_addr = (void *)(uintptr_t)
109 rte_cpu_to_le_64((uint64_t)(uintptr_t)
110 rte_pktmbuf_alloc(rx_ring->mb_pool));
111 rxbd->w.addr = (uint64_t)(uintptr_t)
112 rx_swbd->buffer_addr->buf_iova +
113 rx_swbd->buffer_addr->data_off;
114 /* clear 'R" as well */
119 if (unlikely(i == rx_ring->bd_count)) {
121 rxbd = ENETC_RXBD(*rx_ring, 0);
122 rx_swbd = &rx_ring->q_swbd[i];
127 rx_ring->next_to_alloc = i;
128 rx_ring->next_to_use = i;
129 enetc_wr_reg(rx_ring->rcir, i);
135 static inline void enetc_slow_parsing(struct rte_mbuf *m,
136 uint64_t parse_results)
138 m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
140 switch (parse_results) {
141 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
142 m->packet_type = RTE_PTYPE_L2_ETHER |
144 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
146 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
147 m->packet_type = RTE_PTYPE_L2_ETHER |
149 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
151 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
152 m->packet_type = RTE_PTYPE_L2_ETHER |
155 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
158 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
159 m->packet_type = RTE_PTYPE_L2_ETHER |
162 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
165 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
166 m->packet_type = RTE_PTYPE_L2_ETHER |
169 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
172 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
173 m->packet_type = RTE_PTYPE_L2_ETHER |
176 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
179 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
180 m->packet_type = RTE_PTYPE_L2_ETHER |
183 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
186 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
187 m->packet_type = RTE_PTYPE_L2_ETHER |
190 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
193 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
194 m->packet_type = RTE_PTYPE_L2_ETHER |
197 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
200 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
201 m->packet_type = RTE_PTYPE_L2_ETHER |
204 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
207 /* More switch cases can be added */
209 m->packet_type = RTE_PTYPE_UNKNOWN;
210 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
211 PKT_RX_L4_CKSUM_UNKNOWN;
216 static inline void __attribute__((hot))
217 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
219 ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results);
220 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
222 switch (parse_results) {
223 case ENETC_PKT_TYPE_ETHER:
224 m->packet_type = RTE_PTYPE_L2_ETHER;
226 case ENETC_PKT_TYPE_IPV4:
227 m->packet_type = RTE_PTYPE_L2_ETHER |
230 case ENETC_PKT_TYPE_IPV6:
231 m->packet_type = RTE_PTYPE_L2_ETHER |
234 case ENETC_PKT_TYPE_IPV4_TCP:
235 m->packet_type = RTE_PTYPE_L2_ETHER |
239 case ENETC_PKT_TYPE_IPV6_TCP:
240 m->packet_type = RTE_PTYPE_L2_ETHER |
244 case ENETC_PKT_TYPE_IPV4_UDP:
245 m->packet_type = RTE_PTYPE_L2_ETHER |
249 case ENETC_PKT_TYPE_IPV6_UDP:
250 m->packet_type = RTE_PTYPE_L2_ETHER |
254 case ENETC_PKT_TYPE_IPV4_SCTP:
255 m->packet_type = RTE_PTYPE_L2_ETHER |
259 case ENETC_PKT_TYPE_IPV6_SCTP:
260 m->packet_type = RTE_PTYPE_L2_ETHER |
264 case ENETC_PKT_TYPE_IPV4_ICMP:
265 m->packet_type = RTE_PTYPE_L2_ETHER |
269 case ENETC_PKT_TYPE_IPV6_ICMP:
270 m->packet_type = RTE_PTYPE_L2_ETHER |
274 /* More switch cases can be added */
276 enetc_slow_parsing(m, parse_results);
282 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
283 struct rte_mbuf **rx_pkts,
288 struct enetc_swbd *rx_swbd;
290 cleaned_cnt = enetc_bd_unused(rx_ring);
291 /* next descriptor to process */
292 i = rx_ring->next_to_clean;
293 rx_swbd = &rx_ring->q_swbd[i];
294 while (likely(rx_frm_cnt < work_limit)) {
295 union enetc_rx_bd *rxbd;
298 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
299 int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
301 cleaned_cnt -= count;
304 rxbd = ENETC_RXBD(*rx_ring, i);
305 bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
309 rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
311 rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
313 rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
314 rx_swbd->buffer_addr->ol_flags = 0;
315 enetc_dev_rx_parse(rx_swbd->buffer_addr,
316 rxbd->r.parse_summary);
317 rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
321 if (unlikely(i == rx_ring->bd_count)) {
323 rx_swbd = &rx_ring->q_swbd[i];
326 rx_ring->next_to_clean = i;
334 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
337 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
339 return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);