2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
4 * Copyright (c) 2015 QLogic Corporation.
8 * See LICENSE.bnx2x_pmd for copyright and licensing details.
12 #include "bnx2x_rxtx.h"
14 static const struct rte_memzone *
15 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
16 uint16_t queue_id, uint32_t ring_size, int socket_id)
18 char z_name[RTE_MEMZONE_NAMESIZE];
19 const struct rte_memzone *mz;
21 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
22 dev->device->driver->name, ring_name,
23 dev->data->port_id, queue_id);
25 mz = rte_memzone_lookup(z_name);
29 return rte_memzone_reserve_aligned(z_name, ring_size, socket_id,
30 RTE_MEMZONE_IOVA_CONTIG, BNX2X_PAGE_SIZE);
34 bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue)
37 struct rte_mbuf **sw_ring;
39 if (NULL != rx_queue) {
41 sw_ring = rx_queue->sw_ring;
42 if (NULL != sw_ring) {
43 for (i = 0; i < rx_queue->nb_rx_desc; i++) {
44 if (NULL != sw_ring[i])
45 rte_pktmbuf_free(sw_ring[i]);
54 bnx2x_dev_rx_queue_release(void *rxq)
56 bnx2x_rx_queue_release(rxq);
60 bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
63 unsigned int socket_id,
64 __rte_unused const struct rte_eth_rxconf *rx_conf,
65 struct rte_mempool *mp)
68 const struct rte_memzone *dma;
69 struct bnx2x_rx_queue *rxq;
71 struct rte_mbuf *mbuf;
72 struct bnx2x_softc *sc = dev->data->dev_private;
73 struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
74 struct eth_rx_cqe_next_page *nextpg;
78 /* First allocate the rx queue data structure */
79 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
80 RTE_CACHE_LINE_SIZE, socket_id);
82 PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
87 rxq->queue_id = queue_idx;
88 rxq->port_id = dev->data->port_id;
91 while (USABLE_RX_BD(rxq) < nb_desc)
92 rxq->nb_rx_pages <<= 1;
94 rxq->nb_rx_desc = TOTAL_RX_BD(rxq);
95 sc->rx_ring_size = USABLE_RX_BD(rxq);
96 rxq->nb_cq_pages = RCQ_BD_PAGES(rxq);
98 PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, usable_bd=%lu, "
99 "total_bd=%lu, rx_pages=%u, cq_pages=%u",
100 queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq),
101 (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages,
104 /* Allocate RX ring hardware descriptors */
105 dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd);
106 dma = ring_dma_zone_reserve(dev, "hw_ring", queue_idx, dma_size, socket_id);
108 PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!");
109 bnx2x_rx_queue_release(rxq);
112 fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->iova;
113 rxq->rx_ring = (uint64_t*)dma->addr;
114 memset((void *)rxq->rx_ring, 0, dma_size);
116 /* Link the RX chain pages. */
117 for (j = 1; j <= rxq->nb_rx_pages; j++) {
118 rx_bd = &rxq->rx_ring[TOTAL_RX_BD_PER_PAGE * j - 2];
119 busaddr = rxq->rx_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_rx_pages);
123 /* Allocate software ring */
124 dma_size = rxq->nb_rx_desc * sizeof(struct bnx2x_rx_entry);
125 rxq->sw_ring = rte_zmalloc_socket("sw_ring", dma_size,
128 if (NULL == rxq->sw_ring) {
129 PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!");
130 bnx2x_rx_queue_release(rxq);
134 /* Initialize software ring entries */
135 for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) {
136 mbuf = rte_mbuf_raw_alloc(mp);
138 PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d",
139 (unsigned)rxq->queue_id, idx);
140 bnx2x_rx_queue_release(rxq);
143 rxq->sw_ring[idx] = mbuf;
144 rxq->rx_ring[idx] = mbuf->buf_iova;
146 rxq->pkt_first_seg = NULL;
147 rxq->pkt_last_seg = NULL;
149 rxq->rx_bd_tail = rxq->nb_rx_desc;
151 /* Allocate CQ chain. */
152 dma_size = BNX2X_RX_CHAIN_PAGE_SZ * rxq->nb_cq_pages;
153 dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id);
155 PMD_RX_LOG(ERR, "RCQ alloc failed");
158 fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->iova;
159 rxq->cq_ring = (union eth_rx_cqe*)dma->addr;
161 /* Link the CQ chain pages. */
162 for (j = 1; j <= rxq->nb_cq_pages; j++) {
163 nextpg = &rxq->cq_ring[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1].next_page_cqe;
164 busaddr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_cq_pages);
165 nextpg->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
166 nextpg->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
169 rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq);
171 dev->data->rx_queues[queue_idx] = rxq;
172 if (!sc->rx_queues) sc->rx_queues = dev->data->rx_queues;
178 bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue)
181 struct rte_mbuf **sw_ring;
183 if (NULL != tx_queue) {
185 sw_ring = tx_queue->sw_ring;
186 if (NULL != sw_ring) {
187 for (i = 0; i < tx_queue->nb_tx_desc; i++) {
188 if (NULL != sw_ring[i])
189 rte_pktmbuf_free(sw_ring[i]);
198 bnx2x_dev_tx_queue_release(void *txq)
200 bnx2x_tx_queue_release(txq);
204 bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
206 struct bnx2x_tx_queue *txq;
207 struct bnx2x_softc *sc;
208 struct bnx2x_fastpath *fp;
210 uint16_t nb_pkt_sent = 0;
215 fp = &sc->fp[txq->queue_id];
217 if ((unlikely((txq->nb_tx_desc - txq->nb_tx_avail) >
218 txq->tx_free_thresh)))
221 nb_tx_pkts = RTE_MIN(nb_pkts, txq->nb_tx_avail / BDS_PER_TX_PKT);
222 if (unlikely(nb_tx_pkts == 0))
225 while (nb_tx_pkts--) {
226 struct rte_mbuf *m = *tx_pkts++;
228 ret = bnx2x_tx_encap(txq, m);
229 fp->tx_db.data.prod += ret;
233 bnx2x_update_fp_sb_idx(fp);
235 DOORBELL(sc, txq->queue_id, fp->tx_db.raw);
238 if ((txq->nb_tx_desc - txq->nb_tx_avail) >
246 bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
249 unsigned int socket_id,
250 const struct rte_eth_txconf *tx_conf)
254 const struct rte_memzone *tz;
255 struct bnx2x_tx_queue *txq;
256 struct eth_tx_next_bd *tx_n_bd;
258 struct bnx2x_softc *sc = dev->data->dev_private;
259 struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
261 /* First allocate the tx queue data structure */
262 txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue),
263 RTE_CACHE_LINE_SIZE);
268 txq->nb_tx_pages = 1;
269 while (USABLE_TX_BD(txq) < nb_desc)
270 txq->nb_tx_pages <<= 1;
272 txq->nb_tx_desc = TOTAL_TX_BD(txq);
273 sc->tx_ring_size = TOTAL_TX_BD(txq);
275 txq->tx_free_thresh = tx_conf->tx_free_thresh ?
276 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
277 txq->tx_free_thresh = min(txq->tx_free_thresh,
278 txq->nb_tx_desc - BDS_PER_TX_PKT);
280 PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
281 "total_bd=%lu, tx_pages=%u",
282 queue_idx, nb_desc, txq->tx_free_thresh,
283 (unsigned long)USABLE_TX_BD(txq),
284 (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages);
286 /* Allocate TX ring hardware descriptors */
287 tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types);
288 tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id);
290 bnx2x_tx_queue_release(txq);
293 fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->iova;
294 txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
295 memset(txq->tx_ring, 0, tsize);
297 /* Allocate software ring */
298 tsize = txq->nb_tx_desc * sizeof(struct rte_mbuf *);
299 txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
300 RTE_CACHE_LINE_SIZE);
301 if (txq->sw_ring == NULL) {
302 bnx2x_tx_queue_release(txq);
306 /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
307 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */
310 for (i = 1; i <= txq->nb_tx_pages; i++) {
311 tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd;
312 busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages);
313 tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
314 tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
315 /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */
318 txq->queue_id = queue_idx;
319 txq->port_id = dev->data->port_id;
320 txq->tx_pkt_tail = 0;
321 txq->tx_pkt_head = 0;
324 txq->nb_tx_avail = txq->nb_tx_desc;
325 dev->tx_pkt_burst = bnx2x_xmit_pkts;
326 dev->data->tx_queues[queue_idx] = txq;
327 if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues;
333 bnx2x_upd_rx_prod_fast(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
334 uint16_t rx_bd_prod, uint16_t rx_cq_prod)
336 union ustorm_eth_rx_producers rx_prods;
338 rx_prods.prod.bd_prod = rx_bd_prod;
339 rx_prods.prod.cqe_prod = rx_cq_prod;
341 REG_WR(sc, fp->ustorm_rx_prods_offset, rx_prods.raw_data[0]);
345 bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
347 struct bnx2x_rx_queue *rxq = p_rxq;
348 struct bnx2x_softc *sc = rxq->sc;
349 struct bnx2x_fastpath *fp = &sc->fp[rxq->queue_id];
351 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
352 uint16_t bd_cons, bd_prod;
353 struct rte_mbuf *new_mb;
355 struct eth_fast_path_rx_cqe *cqe_fp;
357 struct rte_mbuf *rx_mb = NULL;
359 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
360 if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) ==
361 USABLE_RCQ_ENTRIES_PER_PAGE) {
365 bd_cons = rxq->rx_bd_head;
366 bd_prod = rxq->rx_bd_tail;
367 sw_cq_cons = rxq->rx_cq_head;
368 sw_cq_prod = rxq->rx_cq_tail;
370 if (sw_cq_cons == hw_cq_cons)
373 while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) {
375 bd_prod &= MAX_RX_BD(rxq);
376 bd_cons &= MAX_RX_BD(rxq);
378 cqe_fp = &rxq->cq_ring[sw_cq_cons & MAX_RX_BD(rxq)].fast_path_cqe;
380 if (unlikely(CQE_TYPE_SLOW(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_TYPE))) {
381 PMD_RX_LOG(ERR, "slowpath event during traffic processing");
385 if (unlikely(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
386 PMD_RX_LOG(ERR, "flags 0x%x rx packet %u",
387 cqe_fp->type_error_flags, sw_cq_cons);
391 len = cqe_fp->pkt_len_or_gro_seg_len;
392 pad = cqe_fp->placement_offset;
394 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
395 if (unlikely(!new_mb)) {
396 PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index);
397 rte_eth_devices[rxq->port_id].data->
398 rx_mbuf_alloc_failed++;
402 rx_mb = rxq->sw_ring[bd_cons];
403 rxq->sw_ring[bd_cons] = new_mb;
404 rxq->rx_ring[bd_prod] = new_mb->buf_iova;
406 rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq);
407 rte_prefetch0(rxq->sw_ring[rx_pref]);
408 if ((rx_pref & 0x3) == 0) {
409 rte_prefetch0(&rxq->rx_ring[rx_pref]);
410 rte_prefetch0(&rxq->sw_ring[rx_pref]);
413 rx_mb->data_off = pad;
416 rx_mb->pkt_len = rx_mb->data_len = len;
417 rx_mb->port = rxq->port_id;
418 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
421 * If we received a packet with a vlan tag,
422 * attach that information to the packet.
424 if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) {
425 rx_mb->vlan_tci = cqe_fp->vlan_tag;
426 rx_mb->ol_flags |= PKT_RX_VLAN;
429 rx_pkts[nb_rx] = rx_mb;
432 /* limit spinning on the queue */
433 if (unlikely(nb_rx == sc->rx_budget)) {
434 PMD_RX_LOG(ERR, "Limit spinning on the queue");
439 bd_cons = NEXT_RX_BD(bd_cons);
440 bd_prod = NEXT_RX_BD(bd_prod);
441 sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod);
442 sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons);
444 rxq->rx_bd_head = bd_cons;
445 rxq->rx_bd_tail = bd_prod;
446 rxq->rx_cq_head = sw_cq_cons;
447 rxq->rx_cq_tail = sw_cq_prod;
449 bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod);
455 bnx2x_dev_rx_init(struct rte_eth_dev *dev)
457 dev->rx_pkt_burst = bnx2x_recv_pkts;
463 bnx2x_dev_clear_queues(struct rte_eth_dev *dev)
467 PMD_INIT_FUNC_TRACE();
469 for (i = 0; i < dev->data->nb_tx_queues; i++) {
470 struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];
472 bnx2x_tx_queue_release(txq);
473 dev->data->tx_queues[i] = NULL;
477 for (i = 0; i < dev->data->nb_rx_queues; i++) {
478 struct bnx2x_rx_queue *rxq = dev->data->rx_queues[i];
480 bnx2x_rx_queue_release(rxq);
481 dev->data->rx_queues[i] = NULL;