1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
9 #include "rte_ethdev.h"
10 #include "rte_malloc.h"
11 #include "rte_memzone.h"
13 #include "base/enetc_hw.h"
15 #include "enetc_logs.h"
17 #define ENETC_RXBD_BUNDLE 8 /* Number of BDs to update at once */
20 enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
23 struct enetc_swbd *tx_swbd;
26 i = tx_ring->next_to_clean;
27 tx_swbd = &tx_ring->q_swbd[i];
28 while ((int)(enetc_rd_reg(tx_ring->tcisr) &
29 ENETC_TBCISR_IDX_MASK) != i) {
30 rte_pktmbuf_free(tx_swbd->buffer_addr);
31 tx_swbd->buffer_addr = NULL;
34 if (unlikely(i == tx_ring->bd_count)) {
36 tx_swbd = &tx_ring->q_swbd[0];
42 tx_ring->next_to_clean = i;
47 enetc_xmit_pkts(void *tx_queue,
48 struct rte_mbuf **tx_pkts,
51 struct enetc_swbd *tx_swbd;
53 struct enetc_tx_bd *txbd;
54 struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
56 i = tx_ring->next_to_use;
59 enetc_clean_tx_ring(tx_ring);
60 tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
61 txbd = ENETC_TXBD(*tx_ring, i);
62 tx_swbd = &tx_ring->q_swbd[i];
63 txbd->frm_len = tx_pkts[start]->pkt_len;
64 txbd->buf_len = txbd->frm_len;
65 txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
66 txbd->addr = (uint64_t)(uintptr_t)
67 rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova +
68 tx_swbd->buffer_addr->data_off);
71 if (unlikely(i == tx_ring->bd_count))
75 tx_ring->next_to_use = i;
76 enetc_wr_reg(tx_ring->tcir, i);
81 enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
83 struct enetc_swbd *rx_swbd;
84 union enetc_rx_bd *rxbd;
87 i = rx_ring->next_to_use;
88 rx_swbd = &rx_ring->q_swbd[i];
89 rxbd = ENETC_RXBD(*rx_ring, i);
90 for (j = 0; j < buff_cnt; j++) {
91 rx_swbd->buffer_addr = (void *)(uintptr_t)
92 rte_cpu_to_le_64((uint64_t)(uintptr_t)
93 rte_pktmbuf_alloc(rx_ring->mb_pool));
94 rxbd->w.addr = (uint64_t)(uintptr_t)
95 rx_swbd->buffer_addr->buf_iova +
96 rx_swbd->buffer_addr->data_off;
97 /* clear 'R" as well */
102 if (unlikely(i == rx_ring->bd_count)) {
104 rxbd = ENETC_RXBD(*rx_ring, 0);
105 rx_swbd = &rx_ring->q_swbd[i];
110 rx_ring->next_to_alloc = i;
111 rx_ring->next_to_use = i;
112 enetc_wr_reg(rx_ring->rcir, i);
119 static inline void __attribute__((hot))
120 enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
122 ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results);
124 m->packet_type = RTE_PTYPE_UNKNOWN;
125 switch (parse_results) {
126 case ENETC_PKT_TYPE_ETHER:
127 m->packet_type = RTE_PTYPE_L2_ETHER;
129 case ENETC_PKT_TYPE_IPV4:
130 m->packet_type = RTE_PTYPE_L2_ETHER |
133 case ENETC_PKT_TYPE_IPV6:
134 m->packet_type = RTE_PTYPE_L2_ETHER |
137 case ENETC_PKT_TYPE_IPV4_TCP:
138 m->packet_type = RTE_PTYPE_L2_ETHER |
142 case ENETC_PKT_TYPE_IPV6_TCP:
143 m->packet_type = RTE_PTYPE_L2_ETHER |
147 case ENETC_PKT_TYPE_IPV4_UDP:
148 m->packet_type = RTE_PTYPE_L2_ETHER |
152 case ENETC_PKT_TYPE_IPV6_UDP:
153 m->packet_type = RTE_PTYPE_L2_ETHER |
157 case ENETC_PKT_TYPE_IPV4_SCTP:
158 m->packet_type = RTE_PTYPE_L2_ETHER |
162 case ENETC_PKT_TYPE_IPV6_SCTP:
163 m->packet_type = RTE_PTYPE_L2_ETHER |
167 case ENETC_PKT_TYPE_IPV4_ICMP:
168 m->packet_type = RTE_PTYPE_L2_ETHER |
172 case ENETC_PKT_TYPE_IPV6_ICMP:
173 m->packet_type = RTE_PTYPE_L2_ETHER |
177 /* More switch cases can be added */
179 m->packet_type = RTE_PTYPE_UNKNOWN;
184 enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
185 struct rte_mbuf **rx_pkts,
190 struct enetc_swbd *rx_swbd;
192 cleaned_cnt = enetc_bd_unused(rx_ring);
193 /* next descriptor to process */
194 i = rx_ring->next_to_clean;
195 rx_swbd = &rx_ring->q_swbd[i];
196 while (likely(rx_frm_cnt < work_limit)) {
197 union enetc_rx_bd *rxbd;
200 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
201 int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
203 cleaned_cnt -= count;
206 rxbd = ENETC_RXBD(*rx_ring, i);
207 bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
211 rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len -
213 rx_swbd->buffer_addr->data_len = rxbd->r.buf_len -
215 rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
216 rx_swbd->buffer_addr->ol_flags = 0;
217 enetc_dev_rx_parse(rx_swbd->buffer_addr,
218 rxbd->r.parse_summary);
219 rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
223 if (unlikely(i == rx_ring->bd_count)) {
225 rx_swbd = &rx_ring->q_swbd[i];
228 rx_ring->next_to_clean = i;
236 enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
239 struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
241 return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);