1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
13 #include "base/enetc_hw.h"
15 #include "enetc_logs.h"
18 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
21 struct enetc_swbd *tx_swbd;
24 /* we don't need barriers here, we just want a relatively current value
27 hwci = (int)(rte_read32_relaxed(tx_ring->tcisr) &
28 ENETC_TBCISR_IDX_MASK);
30 i = tx_ring->next_to_clean;
31 tx_swbd = &tx_ring->q_swbd[i];
33 /* we're only reading the CI index once here, which means HW may update
34 * it while we're doing clean-up. We could read the register in a loop
35 * but for now I assume it's OK to leave a few Tx frames for next call.
36 * The issue with reading the register in a loop is that we're stalling
37 * here trying to catch up with HW which keeps sending traffic as long
38 * as it has traffic to send, so in effect we could be waiting here for
39 * the Tx ring to be drained by HW, instead of us doing Rx in that
43 rte_pktmbuf_free(tx_swbd->buffer_addr);
44 tx_swbd->buffer_addr = NULL;
47 if (unlikely(i == tx_ring->bd_count)) {
49 tx_swbd = &tx_ring->q_swbd[0];
55 tx_ring->next_to_clean = i;
60 enetc_xmit_pkts(void *tx_queue,
61 struct rte_mbuf **tx_pkts,
64 struct enetc_swbd *tx_swbd;
65 int i, start, bds_to_use;
66 struct enetc_tx_bd *txbd;
67 struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
69 i = tx_ring->next_to_use;
71 bds_to_use = enetc_bd_unused(tx_ring);
72 if (bds_to_use < nb_pkts)
77 tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
78 txbd = ENETC_TXBD(*tx_ring, i);
79 tx_swbd = &tx_ring->q_swbd[i];
80 txbd->frm_len = tx_pkts[start]->pkt_len;
81 txbd->buf_len = txbd->frm_len;
82 txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
83 txbd->addr = (uint64_t)(uintptr_t)
84 rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
85 tx_swbd->buffer_addr->data_off);
88 if (unlikely(i == tx_ring->bd_count))
92 /* we're only cleaning up the Tx ring here, on the assumption that
93 * software is slower than hardware and hardware completed sending
94 * older frames out by now.
95 * We're also cleaning up the ring before kicking off Tx for the new
96 * batch to minimize chances of contention on the Tx ring
98 enetc_clean_tx_ring(tx_ring);
100 tx_ring->next_to_use = i;
101 enetc_wr_reg(tx_ring->tcir, i);
106 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
108 struct enetc_swbd *rx_swbd;
109 union enetc_rx_bd *rxbd;
112 i = rx_ring->next_to_use;
113 rx_swbd = &rx_ring->q_swbd[i];
114 rxbd = ENETC_RXBD(*rx_ring, i);
115 for (j = 0; j < buff_cnt; j++) {
116 rx_swbd->buffer_addr = (void *)(uintptr_t)
117 rte_cpu_to_le_64((uint64_t)(uintptr_t)
118 rte_pktmbuf_alloc(rx_ring->mb_pool));
119 rxbd->w.addr = (uint64_t)(uintptr_t)
120 rx_swbd->buffer_addr->buf_iova +
121 rx_swbd->buffer_addr->data_off;
122 /* clear 'R" as well */
127 if (unlikely(i == rx_ring->bd_count)) {
129 rxbd = ENETC_RXBD(*rx_ring, 0);
130 rx_swbd = &rx_ring->q_swbd[i];
135 rx_ring->next_to_alloc = i;
136 rx_ring->next_to_use = i;
137 enetc_wr_reg(rx_ring->rcir, i);
143 static inline void enetc_slow_parsing(struct rte_mbuf *m,
144 uint64_t parse_results)
146 m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
148 switch (parse_results) {
149 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
150 m->packet_type = RTE_PTYPE_L2_ETHER |
152 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
154 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
155 m->packet_type = RTE_PTYPE_L2_ETHER |
157 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
159 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
160 m->packet_type = RTE_PTYPE_L2_ETHER |
163 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
166 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
167 m->packet_type = RTE_PTYPE_L2_ETHER |
170 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
173 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
174 m->packet_type = RTE_PTYPE_L2_ETHER |
177 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
180 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
181 m->packet_type = RTE_PTYPE_L2_ETHER |
184 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
187 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
188 m->packet_type = RTE_PTYPE_L2_ETHER |
191 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
194 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
195 m->packet_type = RTE_PTYPE_L2_ETHER |
198 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
201 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
202 m->packet_type = RTE_PTYPE_L2_ETHER |
205 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
208 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
209 m->packet_type = RTE_PTYPE_L2_ETHER |
212 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
215 /* More switch cases can be added */
217 m->packet_type = RTE_PTYPE_UNKNOWN;
218 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
219 PKT_RX_L4_CKSUM_UNKNOWN;
224 static inline void __attribute__((hot))
225 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
227 ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results);
228 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
230 switch (parse_results) {
231 case ENETC_PKT_TYPE_ETHER:
232 m->packet_type = RTE_PTYPE_L2_ETHER;
234 case ENETC_PKT_TYPE_IPV4:
235 m->packet_type = RTE_PTYPE_L2_ETHER |
238 case ENETC_PKT_TYPE_IPV6:
239 m->packet_type = RTE_PTYPE_L2_ETHER |
242 case ENETC_PKT_TYPE_IPV4_TCP:
243 m->packet_type = RTE_PTYPE_L2_ETHER |
247 case ENETC_PKT_TYPE_IPV6_TCP:
248 m->packet_type = RTE_PTYPE_L2_ETHER |
252 case ENETC_PKT_TYPE_IPV4_UDP:
253 m->packet_type = RTE_PTYPE_L2_ETHER |
257 case ENETC_PKT_TYPE_IPV6_UDP:
258 m->packet_type = RTE_PTYPE_L2_ETHER |
262 case ENETC_PKT_TYPE_IPV4_SCTP:
263 m->packet_type = RTE_PTYPE_L2_ETHER |
267 case ENETC_PKT_TYPE_IPV6_SCTP:
268 m->packet_type = RTE_PTYPE_L2_ETHER |
272 case ENETC_PKT_TYPE_IPV4_ICMP:
273 m->packet_type = RTE_PTYPE_L2_ETHER |
277 case ENETC_PKT_TYPE_IPV6_ICMP:
278 m->packet_type = RTE_PTYPE_L2_ETHER |
282 /* More switch cases can be added */
284 enetc_slow_parsing(m, parse_results);
290 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
291 struct rte_mbuf **rx_pkts,
296 struct enetc_swbd *rx_swbd;
298 cleaned_cnt = enetc_bd_unused(rx_ring);
299 /* next descriptor to process */
300 i = rx_ring->next_to_clean;
301 rx_swbd = &rx_ring->q_swbd[i];
302 while (likely(rx_frm_cnt < work_limit)) {
303 union enetc_rx_bd *rxbd;
306 rxbd = ENETC_RXBD(*rx_ring, i);
307 bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
311 rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
313 rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
315 rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
316 rx_swbd->buffer_addr->ol_flags = 0;
317 enetc_dev_rx_parse(rx_swbd->buffer_addr,
318 rxbd->r.parse_summary);
319 rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
323 if (unlikely(i == rx_ring->bd_count)) {
325 rx_swbd = &rx_ring->q_swbd[i];
328 rx_ring->next_to_clean = i;
332 enetc_refill_rx_ring(rx_ring, cleaned_cnt);
338 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
341 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
343 return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);