1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
13 #include "base/enetc_hw.h"
15 #include "enetc_logs.h"
17 #define ENETC_RXBD_BUNDLE 16 /* Number of buffers to allocate at once */
20 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
23 struct enetc_swbd *tx_swbd;
26 /* we don't need barriers here, we just want a relatively current value
29 hwci = (int)(rte_read32_relaxed(tx_ring->tcisr) &
30 ENETC_TBCISR_IDX_MASK);
32 i = tx_ring->next_to_clean;
33 tx_swbd = &tx_ring->q_swbd[i];
35 /* we're only reading the CI index once here, which means HW may update
36 * it while we're doing clean-up. We could read the register in a loop
37 * but for now I assume it's OK to leave a few Tx frames for next call.
38 * The issue with reading the register in a loop is that we're stalling
39 * here trying to catch up with HW which keeps sending traffic as long
40 * as it has traffic to send, so in effect we could be waiting here for
41 * the Tx ring to be drained by HW, instead of us doing Rx in that
45 rte_pktmbuf_free(tx_swbd->buffer_addr);
46 tx_swbd->buffer_addr = NULL;
49 if (unlikely(i == tx_ring->bd_count)) {
51 tx_swbd = &tx_ring->q_swbd[0];
57 tx_ring->next_to_clean = i;
62 enetc_xmit_pkts(void *tx_queue,
63 struct rte_mbuf **tx_pkts,
66 struct enetc_swbd *tx_swbd;
67 int i, start, bds_to_use;
68 struct enetc_tx_bd *txbd;
69 struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
71 i = tx_ring->next_to_use;
73 bds_to_use = enetc_bd_unused(tx_ring);
74 if (bds_to_use < nb_pkts)
79 tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
80 txbd = ENETC_TXBD(*tx_ring, i);
81 tx_swbd = &tx_ring->q_swbd[i];
82 txbd->frm_len = tx_pkts[start]->pkt_len;
83 txbd->buf_len = txbd->frm_len;
84 txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
85 txbd->addr = (uint64_t)(uintptr_t)
86 rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
87 tx_swbd->buffer_addr->data_off);
90 if (unlikely(i == tx_ring->bd_count))
94 /* we're only cleaning up the Tx ring here, on the assumption that
95 * software is slower than hardware and hardware completed sending
96 * older frames out by now.
97 * We're also cleaning up the ring before kicking off Tx for the new
98 * batch to minimize chances of contention on the Tx ring
100 enetc_clean_tx_ring(tx_ring);
102 tx_ring->next_to_use = i;
103 enetc_wr_reg(tx_ring->tcir, i);
108 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
110 struct enetc_swbd *rx_swbd;
111 union enetc_rx_bd *rxbd;
112 int i, j, k = ENETC_RXBD_BUNDLE;
113 struct rte_mbuf *m[ENETC_RXBD_BUNDLE];
114 struct rte_mempool *mb_pool;
116 i = rx_ring->next_to_use;
117 mb_pool = rx_ring->mb_pool;
118 rx_swbd = &rx_ring->q_swbd[i];
119 rxbd = ENETC_RXBD(*rx_ring, i);
120 for (j = 0; j < buff_cnt; j++) {
121 /* bulk alloc for the next up to 8 BDs */
122 if (k == ENETC_RXBD_BUNDLE) {
124 int m_cnt = RTE_MIN(buff_cnt - j, ENETC_RXBD_BUNDLE);
126 if (rte_pktmbuf_alloc_bulk(mb_pool, m, m_cnt))
130 rx_swbd->buffer_addr = m[k];
131 rxbd->w.addr = (uint64_t)(uintptr_t)
132 rx_swbd->buffer_addr->buf_iova +
133 rx_swbd->buffer_addr->data_off;
134 /* clear 'R" as well */
140 if (unlikely(i == rx_ring->bd_count)) {
142 rxbd = ENETC_RXBD(*rx_ring, 0);
143 rx_swbd = &rx_ring->q_swbd[i];
148 rx_ring->next_to_alloc = i;
149 rx_ring->next_to_use = i;
150 enetc_wr_reg(rx_ring->rcir, i);
156 static inline void enetc_slow_parsing(struct rte_mbuf *m,
157 uint64_t parse_results)
159 m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
161 switch (parse_results) {
162 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
163 m->packet_type = RTE_PTYPE_L2_ETHER |
165 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
167 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
168 m->packet_type = RTE_PTYPE_L2_ETHER |
170 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
172 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
173 m->packet_type = RTE_PTYPE_L2_ETHER |
176 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
179 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
180 m->packet_type = RTE_PTYPE_L2_ETHER |
183 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
186 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
187 m->packet_type = RTE_PTYPE_L2_ETHER |
190 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
193 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
194 m->packet_type = RTE_PTYPE_L2_ETHER |
197 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
200 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
201 m->packet_type = RTE_PTYPE_L2_ETHER |
204 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
207 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
208 m->packet_type = RTE_PTYPE_L2_ETHER |
211 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
214 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
215 m->packet_type = RTE_PTYPE_L2_ETHER |
218 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
221 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
222 m->packet_type = RTE_PTYPE_L2_ETHER |
225 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
228 /* More switch cases can be added */
230 m->packet_type = RTE_PTYPE_UNKNOWN;
231 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
232 PKT_RX_L4_CKSUM_UNKNOWN;
237 static inline void __attribute__((hot))
238 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
240 ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results);
241 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
243 switch (parse_results) {
244 case ENETC_PKT_TYPE_ETHER:
245 m->packet_type = RTE_PTYPE_L2_ETHER;
247 case ENETC_PKT_TYPE_IPV4:
248 m->packet_type = RTE_PTYPE_L2_ETHER |
251 case ENETC_PKT_TYPE_IPV6:
252 m->packet_type = RTE_PTYPE_L2_ETHER |
255 case ENETC_PKT_TYPE_IPV4_TCP:
256 m->packet_type = RTE_PTYPE_L2_ETHER |
260 case ENETC_PKT_TYPE_IPV6_TCP:
261 m->packet_type = RTE_PTYPE_L2_ETHER |
265 case ENETC_PKT_TYPE_IPV4_UDP:
266 m->packet_type = RTE_PTYPE_L2_ETHER |
270 case ENETC_PKT_TYPE_IPV6_UDP:
271 m->packet_type = RTE_PTYPE_L2_ETHER |
275 case ENETC_PKT_TYPE_IPV4_SCTP:
276 m->packet_type = RTE_PTYPE_L2_ETHER |
280 case ENETC_PKT_TYPE_IPV6_SCTP:
281 m->packet_type = RTE_PTYPE_L2_ETHER |
285 case ENETC_PKT_TYPE_IPV4_ICMP:
286 m->packet_type = RTE_PTYPE_L2_ETHER |
290 case ENETC_PKT_TYPE_IPV6_ICMP:
291 m->packet_type = RTE_PTYPE_L2_ETHER |
295 /* More switch cases can be added */
297 enetc_slow_parsing(m, parse_results);
303 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
304 struct rte_mbuf **rx_pkts,
309 struct enetc_swbd *rx_swbd;
311 cleaned_cnt = enetc_bd_unused(rx_ring);
312 /* next descriptor to process */
313 i = rx_ring->next_to_clean;
314 rx_swbd = &rx_ring->q_swbd[i];
315 while (likely(rx_frm_cnt < work_limit)) {
316 union enetc_rx_bd *rxbd;
319 rxbd = ENETC_RXBD(*rx_ring, i);
320 bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
324 rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
326 rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
328 rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
329 rx_swbd->buffer_addr->ol_flags = 0;
330 enetc_dev_rx_parse(rx_swbd->buffer_addr,
331 rxbd->r.parse_summary);
332 rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
336 if (unlikely(i == rx_ring->bd_count)) {
338 rx_swbd = &rx_ring->q_swbd[i];
341 rx_ring->next_to_clean = i;
345 enetc_refill_rx_ring(rx_ring, cleaned_cnt);
351 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
354 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
356 return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);