1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
13 #include "base/enetc_hw.h"
15 #include "enetc_logs.h"
17 #define ENETC_RXBD_BUNDLE 16 /* Number of buffers to allocate at once */
20 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
23 struct enetc_swbd *tx_swbd, *tx_swbd_base;
24 int i, hwci, bd_count;
25 struct rte_mbuf *m[ENETC_RXBD_BUNDLE];
27 /* we don't need barriers here, we just want a relatively current value
30 hwci = (int)(rte_read32_relaxed(tx_ring->tcisr) &
31 ENETC_TBCISR_IDX_MASK);
33 tx_swbd_base = tx_ring->q_swbd;
34 bd_count = tx_ring->bd_count;
35 i = tx_ring->next_to_clean;
36 tx_swbd = &tx_swbd_base[i];
38 /* we're only reading the CI index once here, which means HW may update
39 * it while we're doing clean-up. We could read the register in a loop
40 * but for now I assume it's OK to leave a few Tx frames for next call.
41 * The issue with reading the register in a loop is that we're stalling
42 * here trying to catch up with HW which keeps sending traffic as long
43 * as it has traffic to send, so in effect we could be waiting here for
44 * the Tx ring to be drained by HW, instead of us doing Rx in that
48 /* It seems calling rte_pktmbuf_free is wasting a lot of cycles,
49 * make a list and call _free when it's done.
51 if (tx_frm_cnt == ENETC_RXBD_BUNDLE) {
52 rte_pktmbuf_free_bulk(m, tx_frm_cnt);
56 m[tx_frm_cnt] = tx_swbd->buffer_addr;
57 tx_swbd->buffer_addr = NULL;
61 if (unlikely(i == bd_count)) {
63 tx_swbd = tx_swbd_base;
70 rte_pktmbuf_free_bulk(m, tx_frm_cnt);
72 tx_ring->next_to_clean = i;
78 enetc_xmit_pkts(void *tx_queue,
79 struct rte_mbuf **tx_pkts,
82 struct enetc_swbd *tx_swbd;
83 int i, start, bds_to_use;
84 struct enetc_tx_bd *txbd;
85 struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
87 i = tx_ring->next_to_use;
89 bds_to_use = enetc_bd_unused(tx_ring);
90 if (bds_to_use < nb_pkts)
95 tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
96 txbd = ENETC_TXBD(*tx_ring, i);
97 tx_swbd = &tx_ring->q_swbd[i];
98 txbd->frm_len = tx_pkts[start]->pkt_len;
99 txbd->buf_len = txbd->frm_len;
100 txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
101 txbd->addr = (uint64_t)(uintptr_t)
102 rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
103 tx_swbd->buffer_addr->data_off);
106 if (unlikely(i == tx_ring->bd_count))
110 /* we're only cleaning up the Tx ring here, on the assumption that
111 * software is slower than hardware and hardware completed sending
112 * older frames out by now.
113 * We're also cleaning up the ring before kicking off Tx for the new
114 * batch to minimize chances of contention on the Tx ring
116 enetc_clean_tx_ring(tx_ring);
118 tx_ring->next_to_use = i;
119 enetc_wr_reg(tx_ring->tcir, i);
124 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
126 struct enetc_swbd *rx_swbd;
127 union enetc_rx_bd *rxbd;
128 int i, j, k = ENETC_RXBD_BUNDLE;
129 struct rte_mbuf *m[ENETC_RXBD_BUNDLE];
130 struct rte_mempool *mb_pool;
132 i = rx_ring->next_to_use;
133 mb_pool = rx_ring->mb_pool;
134 rx_swbd = &rx_ring->q_swbd[i];
135 rxbd = ENETC_RXBD(*rx_ring, i);
136 for (j = 0; j < buff_cnt; j++) {
137 /* bulk alloc for the next up to 8 BDs */
138 if (k == ENETC_RXBD_BUNDLE) {
140 int m_cnt = RTE_MIN(buff_cnt - j, ENETC_RXBD_BUNDLE);
142 if (rte_pktmbuf_alloc_bulk(mb_pool, m, m_cnt))
146 rx_swbd->buffer_addr = m[k];
147 rxbd->w.addr = (uint64_t)(uintptr_t)
148 rx_swbd->buffer_addr->buf_iova +
149 rx_swbd->buffer_addr->data_off;
150 /* clear 'R" as well */
156 if (unlikely(i == rx_ring->bd_count)) {
158 rxbd = ENETC_RXBD(*rx_ring, 0);
159 rx_swbd = &rx_ring->q_swbd[i];
164 rx_ring->next_to_alloc = i;
165 rx_ring->next_to_use = i;
166 enetc_wr_reg(rx_ring->rcir, i);
172 static inline void enetc_slow_parsing(struct rte_mbuf *m,
173 uint64_t parse_results)
175 m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
177 switch (parse_results) {
178 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
179 m->packet_type = RTE_PTYPE_L2_ETHER |
181 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
183 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
184 m->packet_type = RTE_PTYPE_L2_ETHER |
186 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
188 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
189 m->packet_type = RTE_PTYPE_L2_ETHER |
192 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
195 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
196 m->packet_type = RTE_PTYPE_L2_ETHER |
199 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
202 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
203 m->packet_type = RTE_PTYPE_L2_ETHER |
206 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
209 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
210 m->packet_type = RTE_PTYPE_L2_ETHER |
213 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
216 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
217 m->packet_type = RTE_PTYPE_L2_ETHER |
220 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
223 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
224 m->packet_type = RTE_PTYPE_L2_ETHER |
227 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
230 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
231 m->packet_type = RTE_PTYPE_L2_ETHER |
234 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
237 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
238 m->packet_type = RTE_PTYPE_L2_ETHER |
241 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
244 /* More switch cases can be added */
246 m->packet_type = RTE_PTYPE_UNKNOWN;
247 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
248 PKT_RX_L4_CKSUM_UNKNOWN;
253 static inline void __attribute__((hot))
254 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
256 ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results);
257 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
259 switch (parse_results) {
260 case ENETC_PKT_TYPE_ETHER:
261 m->packet_type = RTE_PTYPE_L2_ETHER;
263 case ENETC_PKT_TYPE_IPV4:
264 m->packet_type = RTE_PTYPE_L2_ETHER |
267 case ENETC_PKT_TYPE_IPV6:
268 m->packet_type = RTE_PTYPE_L2_ETHER |
271 case ENETC_PKT_TYPE_IPV4_TCP:
272 m->packet_type = RTE_PTYPE_L2_ETHER |
276 case ENETC_PKT_TYPE_IPV6_TCP:
277 m->packet_type = RTE_PTYPE_L2_ETHER |
281 case ENETC_PKT_TYPE_IPV4_UDP:
282 m->packet_type = RTE_PTYPE_L2_ETHER |
286 case ENETC_PKT_TYPE_IPV6_UDP:
287 m->packet_type = RTE_PTYPE_L2_ETHER |
291 case ENETC_PKT_TYPE_IPV4_SCTP:
292 m->packet_type = RTE_PTYPE_L2_ETHER |
296 case ENETC_PKT_TYPE_IPV6_SCTP:
297 m->packet_type = RTE_PTYPE_L2_ETHER |
301 case ENETC_PKT_TYPE_IPV4_ICMP:
302 m->packet_type = RTE_PTYPE_L2_ETHER |
306 case ENETC_PKT_TYPE_IPV6_ICMP:
307 m->packet_type = RTE_PTYPE_L2_ETHER |
311 /* More switch cases can be added */
313 enetc_slow_parsing(m, parse_results);
319 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
320 struct rte_mbuf **rx_pkts,
325 struct enetc_swbd *rx_swbd;
327 cleaned_cnt = enetc_bd_unused(rx_ring);
328 /* next descriptor to process */
329 i = rx_ring->next_to_clean;
330 rx_swbd = &rx_ring->q_swbd[i];
331 while (likely(rx_frm_cnt < work_limit)) {
332 union enetc_rx_bd *rxbd;
335 rxbd = ENETC_RXBD(*rx_ring, i);
336 bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
340 rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
342 rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
344 rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
345 rx_swbd->buffer_addr->ol_flags = 0;
346 enetc_dev_rx_parse(rx_swbd->buffer_addr,
347 rxbd->r.parse_summary);
348 rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
352 if (unlikely(i == rx_ring->bd_count)) {
354 rx_swbd = &rx_ring->q_swbd[i];
357 rx_ring->next_to_clean = i;
361 enetc_refill_rx_ring(rx_ring, cleaned_cnt);
367 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
370 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
372 return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);