4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_byteorder.h>
37 #include <rte_malloc.h>
41 #include "bnxt_ring.h"
44 #include "hsi_struct_def_dpdk.h"
51 void bnxt_free_tx_rings(struct bnxt *bp)
55 for (i = 0; i < (int)bp->tx_nr_rings; i++) {
56 struct bnxt_tx_queue *txq = bp->tx_queues[i];
61 bnxt_free_ring(txq->tx_ring->tx_ring_struct);
62 /* TODO: free() txq->tx_ring and txq->tx_ring->tx_ring_struct */
63 bnxt_free_ring(txq->cp_ring->cp_ring_struct);
64 /* TODO: free() txq->cp_ring and txq->cp_ring->cp_ring_struct */
67 bp->tx_queues[i] = NULL;
71 int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
73 struct bnxt_tx_ring_info *txr = txq->tx_ring;
74 struct bnxt_ring *ring = txr->tx_ring_struct;
76 txq->tx_wake_thresh = ring->ring_size / 2;
77 ring->fw_ring_id = INVALID_HW_RING_ID;
82 void bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq)
84 struct bnxt_cp_ring_info *cpr;
85 struct bnxt_tx_ring_info *txr;
86 struct bnxt_ring *ring;
88 /* TODO: These need to be allocated */
90 ring = txr->tx_ring_struct;
91 ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
92 ring->ring_mask = ring->ring_size - 1;
93 ring->bd = (void *)txr->tx_desc_ring;
94 ring->bd_dma = txr->tx_desc_mapping;
95 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
96 ring->vmem = (void **)&txr->tx_buf_ring;
98 /* TODO: These need to be allocated */
100 ring = cpr->cp_ring_struct;
101 ring->ring_size = txr->tx_ring_struct->ring_size;
102 ring->ring_mask = ring->ring_size - 1;
103 ring->bd = (void *)cpr->cp_desc_ring;
104 ring->bd_dma = cpr->cp_desc_mapping;
109 static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr)
111 /* Tell compiler to fetch tx indices from memory. */
112 rte_compiler_barrier();
114 return txr->tx_ring_struct->ring_size -
115 ((txr->tx_prod - txr->tx_cons) &
116 txr->tx_ring_struct->ring_mask) - 1;
119 static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
120 struct bnxt_tx_queue *txq)
122 struct bnxt_tx_ring_info *txr = txq->tx_ring;
123 struct tx_bd_long *txbd;
124 struct tx_bd_long_hi *txbd1;
125 uint32_t vlan_tag_flags, cfa_action;
126 bool long_bd = false;
127 uint16_t last_prod = 0;
128 struct rte_mbuf *m_seg;
129 struct bnxt_sw_tx_bd *tx_buf;
130 static const uint32_t lhint_arr[4] = {
131 TX_BD_LONG_FLAGS_LHINT_LT512,
132 TX_BD_LONG_FLAGS_LHINT_LT1K,
133 TX_BD_LONG_FLAGS_LHINT_LT2K,
134 TX_BD_LONG_FLAGS_LHINT_LT2K
137 if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
138 PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
142 tx_buf = &txr->tx_buf_ring[txr->tx_prod];
143 tx_buf->mbuf = tx_pkt;
144 tx_buf->nr_bds = long_bd + tx_pkt->nb_segs;
145 last_prod = (txr->tx_prod + tx_buf->nr_bds - 1) &
146 txr->tx_ring_struct->ring_mask;
148 if (unlikely(bnxt_tx_avail(txr) < tx_buf->nr_bds))
151 txbd = &txr->tx_desc_ring[txr->tx_prod];
152 txbd->opaque = txr->tx_prod;
153 txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
154 txbd->len = tx_pkt->data_len;
155 if (txbd->len >= 2014)
156 txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
158 txbd->flags_type |= lhint_arr[txbd->len >> 9];
159 txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(tx_buf->mbuf));
162 txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
165 if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
166 /* shurd: Should this mask at
167 * TX_BD_LONG_CFA_META_VLAN_VID_MASK?
169 vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
170 tx_buf->mbuf->vlan_tci;
171 /* Currently supports 8021Q, 8021AD vlan offloads
172 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
174 /* DPDK only supports 802.11q VLAN packets */
176 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
179 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
181 txbd1 = (struct tx_bd_long_hi *)
182 &txr->tx_desc_ring[txr->tx_prod];
184 txbd1->cfa_meta = vlan_tag_flags;
185 txbd1->cfa_action = cfa_action;
187 if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
189 txbd1->lflags = TX_BD_LONG_LFLAGS_LSO;
190 txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
192 txbd1->mss = tx_pkt->tso_segsz;
194 } else if (tx_pkt->ol_flags & (PKT_TX_TCP_CKSUM |
197 txbd1->lflags = TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
200 } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
202 txbd1->lflags = TX_BD_LONG_LFLAGS_IP_CHKSUM;
206 txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
209 m_seg = tx_pkt->next;
210 /* i is set at the end of the if(long_bd) block */
211 while (txr->tx_prod != last_prod) {
212 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
213 tx_buf = &txr->tx_buf_ring[txr->tx_prod];
215 txbd = &txr->tx_desc_ring[txr->tx_prod];
216 txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(m_seg));
217 txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
218 txbd->len = m_seg->data_len;
223 txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
225 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
230 static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
232 struct bnxt_tx_ring_info *txr = txq->tx_ring;
233 uint16_t cons = txr->tx_cons;
236 for (i = 0; i < nr_pkts; i++) {
237 struct bnxt_sw_tx_bd *tx_buf;
238 struct rte_mbuf *mbuf;
240 tx_buf = &txr->tx_buf_ring[cons];
241 cons = RING_NEXT(txr->tx_ring_struct, cons);
245 /* EW - no need to unmap DMA memory? */
247 for (j = 1; j < tx_buf->nr_bds; j++)
248 cons = RING_NEXT(txr->tx_ring_struct, cons);
249 rte_pktmbuf_free(mbuf);
255 static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
257 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
258 uint32_t raw_cons = cpr->cp_raw_cons;
261 struct tx_cmpl *txcmp;
263 if ((txq->tx_ring->tx_ring_struct->ring_size -
264 (bnxt_tx_avail(txq->tx_ring))) >
265 txq->tx_free_thresh) {
267 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
268 txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
270 if (!CMP_VALID(txcmp, raw_cons, cpr->cp_ring_struct))
273 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
277 "Unhandled CMP type %02x\n",
279 raw_cons = NEXT_RAW_CMP(raw_cons);
282 bnxt_tx_cmp(txq, nb_tx_pkts);
283 cpr->cp_raw_cons = raw_cons;
284 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
289 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
292 struct bnxt_tx_queue *txq = tx_queue;
293 uint16_t nb_tx_pkts = 0;
294 uint16_t db_mask = txq->tx_ring->tx_ring_struct->ring_size >> 2;
295 uint16_t last_db_mask = 0;
297 /* Handle TX completions */
298 bnxt_handle_tx_cp(txq);
300 /* Handle TX burst request */
301 for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
302 if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) {
304 } else if ((nb_tx_pkts & db_mask) != last_db_mask) {
305 B_TX_DB(txq->tx_ring->tx_doorbell,
306 txq->tx_ring->tx_prod);
307 last_db_mask = nb_tx_pkts & db_mask;
311 B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod);