1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
13 #include "base/enetc_hw.h"
15 #include "enetc_logs.h"
17 #define ENETC_RXBD_BUNDLE 8 /* Number of BDs to update at once */
20 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
23 struct enetc_swbd *tx_swbd;
26 i = tx_ring->next_to_clean;
27 tx_swbd = &tx_ring->q_swbd[i];
28 while ((int)(enetc_rd_reg(tx_ring->tcisr) &
29 ENETC_TBCISR_IDX_MASK) != i) {
30 rte_pktmbuf_free(tx_swbd->buffer_addr);
31 tx_swbd->buffer_addr = NULL;
34 if (unlikely(i == tx_ring->bd_count)) {
36 tx_swbd = &tx_ring->q_swbd[0];
42 tx_ring->next_to_clean = i;
47 enetc_xmit_pkts(void *tx_queue,
48 struct rte_mbuf **tx_pkts,
51 struct enetc_swbd *tx_swbd;
52 int i, start, bds_to_use;
53 struct enetc_tx_bd *txbd;
54 struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
56 i = tx_ring->next_to_use;
58 bds_to_use = enetc_bd_unused(tx_ring);
59 if (bds_to_use < nb_pkts)
64 enetc_clean_tx_ring(tx_ring);
65 tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
66 txbd = ENETC_TXBD(*tx_ring, i);
67 tx_swbd = &tx_ring->q_swbd[i];
68 txbd->frm_len = tx_pkts[start]->pkt_len;
69 txbd->buf_len = txbd->frm_len;
70 txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
71 txbd->addr = (uint64_t)(uintptr_t)
72 rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
73 tx_swbd->buffer_addr->data_off);
76 if (unlikely(i == tx_ring->bd_count))
80 tx_ring->next_to_use = i;
81 enetc_wr_reg(tx_ring->tcir, i);
86 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
88 struct enetc_swbd *rx_swbd;
89 union enetc_rx_bd *rxbd;
92 i = rx_ring->next_to_use;
93 rx_swbd = &rx_ring->q_swbd[i];
94 rxbd = ENETC_RXBD(*rx_ring, i);
95 for (j = 0; j < buff_cnt; j++) {
96 rx_swbd->buffer_addr = (void *)(uintptr_t)
97 rte_cpu_to_le_64((uint64_t)(uintptr_t)
98 rte_pktmbuf_alloc(rx_ring->mb_pool));
99 rxbd->w.addr = (uint64_t)(uintptr_t)
100 rx_swbd->buffer_addr->buf_iova +
101 rx_swbd->buffer_addr->data_off;
102 /* clear 'R" as well */
107 if (unlikely(i == rx_ring->bd_count)) {
109 rxbd = ENETC_RXBD(*rx_ring, 0);
110 rx_swbd = &rx_ring->q_swbd[i];
115 rx_ring->next_to_alloc = i;
116 rx_ring->next_to_use = i;
117 enetc_wr_reg(rx_ring->rcir, i);
123 static inline void enetc_slow_parsing(struct rte_mbuf *m,
124 uint64_t parse_results)
126 m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
128 switch (parse_results) {
129 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
130 m->packet_type = RTE_PTYPE_L2_ETHER |
132 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
134 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
135 m->packet_type = RTE_PTYPE_L2_ETHER |
137 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
139 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
140 m->packet_type = RTE_PTYPE_L2_ETHER |
143 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
146 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
147 m->packet_type = RTE_PTYPE_L2_ETHER |
150 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
153 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
154 m->packet_type = RTE_PTYPE_L2_ETHER |
157 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
160 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
161 m->packet_type = RTE_PTYPE_L2_ETHER |
164 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
167 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
168 m->packet_type = RTE_PTYPE_L2_ETHER |
171 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
174 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
175 m->packet_type = RTE_PTYPE_L2_ETHER |
178 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
181 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
182 m->packet_type = RTE_PTYPE_L2_ETHER |
185 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
188 case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
189 m->packet_type = RTE_PTYPE_L2_ETHER |
192 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
195 /* More switch cases can be added */
197 m->packet_type = RTE_PTYPE_UNKNOWN;
198 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
199 PKT_RX_L4_CKSUM_UNKNOWN;
204 static inline void __attribute__((hot))
205 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
207 ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results);
208 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
210 switch (parse_results) {
211 case ENETC_PKT_TYPE_ETHER:
212 m->packet_type = RTE_PTYPE_L2_ETHER;
214 case ENETC_PKT_TYPE_IPV4:
215 m->packet_type = RTE_PTYPE_L2_ETHER |
218 case ENETC_PKT_TYPE_IPV6:
219 m->packet_type = RTE_PTYPE_L2_ETHER |
222 case ENETC_PKT_TYPE_IPV4_TCP:
223 m->packet_type = RTE_PTYPE_L2_ETHER |
227 case ENETC_PKT_TYPE_IPV6_TCP:
228 m->packet_type = RTE_PTYPE_L2_ETHER |
232 case ENETC_PKT_TYPE_IPV4_UDP:
233 m->packet_type = RTE_PTYPE_L2_ETHER |
237 case ENETC_PKT_TYPE_IPV6_UDP:
238 m->packet_type = RTE_PTYPE_L2_ETHER |
242 case ENETC_PKT_TYPE_IPV4_SCTP:
243 m->packet_type = RTE_PTYPE_L2_ETHER |
247 case ENETC_PKT_TYPE_IPV6_SCTP:
248 m->packet_type = RTE_PTYPE_L2_ETHER |
252 case ENETC_PKT_TYPE_IPV4_ICMP:
253 m->packet_type = RTE_PTYPE_L2_ETHER |
257 case ENETC_PKT_TYPE_IPV6_ICMP:
258 m->packet_type = RTE_PTYPE_L2_ETHER |
262 /* More switch cases can be added */
264 enetc_slow_parsing(m, parse_results);
270 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
271 struct rte_mbuf **rx_pkts,
276 struct enetc_swbd *rx_swbd;
278 cleaned_cnt = enetc_bd_unused(rx_ring);
279 /* next descriptor to process */
280 i = rx_ring->next_to_clean;
281 rx_swbd = &rx_ring->q_swbd[i];
282 while (likely(rx_frm_cnt < work_limit)) {
283 union enetc_rx_bd *rxbd;
286 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
287 int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
289 cleaned_cnt -= count;
292 rxbd = ENETC_RXBD(*rx_ring, i);
293 bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
297 rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
299 rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
301 rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
302 rx_swbd->buffer_addr->ol_flags = 0;
303 enetc_dev_rx_parse(rx_swbd->buffer_addr,
304 rxbd->r.parse_summary);
305 rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
309 if (unlikely(i == rx_ring->bd_count)) {
311 rx_swbd = &rx_ring->q_swbd[i];
314 rx_ring->next_to_clean = i;
322 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
325 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
327 return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);