2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
4 * Copyright (c) 2015 QLogic Corporation.
8 * See LICENSE.bnx2x_pmd for copyright and licensing details.
12 #include "bnx2x_rxtx.h"
14 static const struct rte_memzone *
15 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
16 uint16_t queue_id, uint32_t ring_size, int socket_id)
18 char z_name[RTE_MEMZONE_NAMESIZE];
19 const struct rte_memzone *mz;
21 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
22 dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id);
24 mz = rte_memzone_lookup(z_name);
28 return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE);
32 bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue)
35 struct rte_mbuf **sw_ring;
37 if (NULL != rx_queue) {
39 sw_ring = rx_queue->sw_ring;
40 if (NULL != sw_ring) {
41 for (i = 0; i < rx_queue->nb_rx_desc; i++) {
42 if (NULL != sw_ring[i])
43 rte_pktmbuf_free(sw_ring[i]);
52 bnx2x_dev_rx_queue_release(void *rxq)
54 bnx2x_rx_queue_release(rxq);
58 bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
61 unsigned int socket_id,
62 const struct rte_eth_rxconf *rx_conf,
63 struct rte_mempool *mp)
66 const struct rte_memzone *dma;
67 struct bnx2x_rx_queue *rxq;
69 struct rte_mbuf *mbuf;
70 struct bnx2x_softc *sc = dev->data->dev_private;
71 struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
72 struct eth_rx_cqe_next_page *nextpg;
76 /* First allocate the rx queue data structure */
77 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
78 RTE_CACHE_LINE_SIZE, socket_id);
80 PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
85 rxq->queue_id = queue_idx;
86 rxq->port_id = dev->data->port_id;
87 rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN);
90 while (USABLE_RX_BD(rxq) < nb_desc)
91 rxq->nb_rx_pages <<= 1;
93 rxq->nb_rx_desc = TOTAL_RX_BD(rxq);
94 sc->rx_ring_size = USABLE_RX_BD(rxq);
95 rxq->nb_cq_pages = RCQ_BD_PAGES(rxq);
97 rxq->rx_free_thresh = rx_conf->rx_free_thresh ?
98 rx_conf->rx_free_thresh : DEFAULT_RX_FREE_THRESH;
100 PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
101 "total_bd=%lu, rx_pages=%u, cq_pages=%u",
102 queue_idx, nb_desc, rxq->rx_free_thresh,
103 (unsigned long)USABLE_RX_BD(rxq),
104 (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages,
107 /* Allocate RX ring hardware descriptors */
108 dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd);
109 dma = ring_dma_zone_reserve(dev, "hw_ring", queue_idx, dma_size, socket_id);
111 PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!");
112 bnx2x_rx_queue_release(rxq);
115 fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr;
116 rxq->rx_ring = (uint64_t*)dma->addr;
117 memset((void *)rxq->rx_ring, 0, dma_size);
119 /* Link the RX chain pages. */
120 for (j = 1; j <= rxq->nb_rx_pages; j++) {
121 rx_bd = &rxq->rx_ring[TOTAL_RX_BD_PER_PAGE * j - 2];
122 busaddr = rxq->rx_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_rx_pages);
126 /* Allocate software ring */
127 dma_size = rxq->nb_rx_desc * sizeof(struct bnx2x_rx_entry);
128 rxq->sw_ring = rte_zmalloc_socket("sw_ring", dma_size,
131 if (NULL == rxq->sw_ring) {
132 PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!");
133 bnx2x_rx_queue_release(rxq);
137 /* Initialize software ring entries */
138 rxq->rx_mbuf_alloc = 0;
139 for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) {
140 mbuf = rte_mbuf_raw_alloc(mp);
142 PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d",
143 (unsigned)rxq->queue_id, idx);
144 bnx2x_rx_queue_release(rxq);
147 rxq->sw_ring[idx] = mbuf;
148 rxq->rx_ring[idx] = mbuf->buf_physaddr;
149 rxq->rx_mbuf_alloc++;
151 rxq->pkt_first_seg = NULL;
152 rxq->pkt_last_seg = NULL;
154 rxq->rx_bd_tail = rxq->nb_rx_desc;
156 /* Allocate CQ chain. */
157 dma_size = BNX2X_RX_CHAIN_PAGE_SZ * rxq->nb_cq_pages;
158 dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id);
160 PMD_RX_LOG(ERR, "RCQ alloc failed");
163 fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr;
164 rxq->cq_ring = (union eth_rx_cqe*)dma->addr;
166 /* Link the CQ chain pages. */
167 for (j = 1; j <= rxq->nb_cq_pages; j++) {
168 nextpg = &rxq->cq_ring[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1].next_page_cqe;
169 busaddr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_cq_pages);
170 nextpg->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
171 nextpg->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
174 rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq);
176 dev->data->rx_queues[queue_idx] = rxq;
177 if (!sc->rx_queues) sc->rx_queues = dev->data->rx_queues;
183 bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue)
186 struct rte_mbuf **sw_ring;
188 if (NULL != tx_queue) {
190 sw_ring = tx_queue->sw_ring;
191 if (NULL != sw_ring) {
192 for (i = 0; i < tx_queue->nb_tx_desc; i++) {
193 if (NULL != sw_ring[i])
194 rte_pktmbuf_free(sw_ring[i]);
203 bnx2x_dev_tx_queue_release(void *txq)
205 bnx2x_tx_queue_release(txq);
209 bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
211 struct bnx2x_tx_queue *txq;
212 struct bnx2x_softc *sc;
213 struct bnx2x_fastpath *fp;
215 struct rte_mbuf **m = tx_pkts;
217 uint16_t nb_pkt_sent = 0;
221 fp = &sc->fp[txq->queue_id];
223 if ((unlikely((txq->nb_tx_desc - txq->nb_tx_avail) >
224 txq->tx_free_thresh)))
227 nb_tx_pkts = RTE_MIN(nb_pkts, txq->nb_tx_avail / BDS_PER_TX_PKT);
228 if (unlikely(nb_tx_pkts == 0))
231 burst = RTE_MIN(nb_tx_pkts, RTE_PMD_BNX2X_TX_MAX_BURST);
233 while (nb_tx_pkts--) {
235 bnx2x_tx_encap(txq, m, burst);
236 bnx2x_update_fp_sb_idx(fp);
237 if ((txq->nb_tx_desc - txq->nb_tx_avail) >
248 bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
251 unsigned int socket_id,
252 const struct rte_eth_txconf *tx_conf)
256 const struct rte_memzone *tz;
257 struct bnx2x_tx_queue *txq;
258 struct eth_tx_next_bd *tx_n_bd;
260 struct bnx2x_softc *sc = dev->data->dev_private;
261 struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
263 /* First allocate the tx queue data structure */
264 txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue),
265 RTE_CACHE_LINE_SIZE);
270 txq->nb_tx_pages = 1;
271 while (USABLE_TX_BD(txq) < nb_desc)
272 txq->nb_tx_pages <<= 1;
274 txq->nb_tx_desc = TOTAL_TX_BD(txq);
275 sc->tx_ring_size = TOTAL_TX_BD(txq);
277 txq->tx_free_thresh = tx_conf->tx_free_thresh ?
278 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
280 PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
281 "total_bd=%lu, tx_pages=%u",
282 queue_idx, nb_desc, txq->tx_free_thresh,
283 (unsigned long)USABLE_TX_BD(txq),
284 (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages);
286 /* Allocate TX ring hardware descriptors */
287 tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types);
288 tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id);
290 bnx2x_tx_queue_release(txq);
293 fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
294 txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
295 memset(txq->tx_ring, 0, tsize);
297 /* Allocate software ring */
298 tsize = txq->nb_tx_desc * sizeof(struct rte_mbuf *);
299 txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
300 RTE_CACHE_LINE_SIZE);
301 if (txq->sw_ring == NULL) {
302 bnx2x_tx_queue_release(txq);
306 /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
307 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */
310 for (i = 1; i <= txq->nb_tx_pages; i++) {
311 tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd;
312 busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages);
313 tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
314 tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
315 /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */
318 txq->queue_id = queue_idx;
319 txq->port_id = dev->data->port_id;
320 txq->tx_pkt_tail = 0;
321 txq->tx_pkt_head = 0;
324 txq->nb_tx_avail = txq->nb_tx_desc;
325 dev->tx_pkt_burst = bnx2x_xmit_pkts;
326 dev->data->tx_queues[queue_idx] = txq;
327 if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues;
333 bnx2x_upd_rx_prod_fast(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
334 uint16_t rx_bd_prod, uint16_t rx_cq_prod)
336 union ustorm_eth_rx_producers rx_prods;
338 rx_prods.prod.bd_prod = rx_bd_prod;
339 rx_prods.prod.cqe_prod = rx_cq_prod;
341 REG_WR(sc, fp->ustorm_rx_prods_offset, rx_prods.raw_data[0]);
345 bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
347 struct bnx2x_rx_queue *rxq = p_rxq;
348 struct bnx2x_softc *sc = rxq->sc;
349 struct bnx2x_fastpath *fp = &sc->fp[rxq->queue_id];
351 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
352 uint16_t bd_cons, bd_prod;
353 struct rte_mbuf *new_mb;
355 struct eth_fast_path_rx_cqe *cqe_fp;
357 struct rte_mbuf *rx_mb = NULL;
359 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
360 if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) ==
361 USABLE_RCQ_ENTRIES_PER_PAGE) {
365 bd_cons = rxq->rx_bd_head;
366 bd_prod = rxq->rx_bd_tail;
367 sw_cq_cons = rxq->rx_cq_head;
368 sw_cq_prod = rxq->rx_cq_tail;
370 if (sw_cq_cons == hw_cq_cons)
373 while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) {
375 bd_prod &= MAX_RX_BD(rxq);
376 bd_cons &= MAX_RX_BD(rxq);
378 cqe_fp = &rxq->cq_ring[sw_cq_cons & MAX_RX_BD(rxq)].fast_path_cqe;
380 if (unlikely(CQE_TYPE_SLOW(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_TYPE))) {
381 PMD_RX_LOG(ERR, "slowpath event during traffic processing");
385 if (unlikely(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
386 PMD_RX_LOG(ERR, "flags 0x%x rx packet %u",
387 cqe_fp->type_error_flags, sw_cq_cons);
391 len = cqe_fp->pkt_len_or_gro_seg_len;
392 pad = cqe_fp->placement_offset;
394 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
395 if (unlikely(!new_mb)) {
396 PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index);
397 rte_eth_devices[rxq->port_id].data->
398 rx_mbuf_alloc_failed++;
402 rx_mb = rxq->sw_ring[bd_cons];
403 rxq->sw_ring[bd_cons] = new_mb;
404 rxq->rx_ring[bd_prod] = new_mb->buf_physaddr;
406 rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq);
407 rte_prefetch0(rxq->sw_ring[rx_pref]);
408 if ((rx_pref & 0x3) == 0) {
409 rte_prefetch0(&rxq->rx_ring[rx_pref]);
410 rte_prefetch0(&rxq->sw_ring[rx_pref]);
413 rx_mb->data_off = pad;
416 rx_mb->pkt_len = rx_mb->data_len = len;
417 rx_mb->port = rxq->port_id;
418 rx_mb->buf_len = len + pad;
419 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
422 * If we received a packet with a vlan tag,
423 * attach that information to the packet.
425 if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) {
426 rx_mb->vlan_tci = cqe_fp->vlan_tag;
427 rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
430 rx_pkts[nb_rx] = rx_mb;
433 /* limit spinning on the queue */
434 if (unlikely(nb_rx == sc->rx_budget)) {
435 PMD_RX_LOG(ERR, "Limit spinning on the queue");
440 bd_cons = NEXT_RX_BD(bd_cons);
441 bd_prod = NEXT_RX_BD(bd_prod);
442 sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod);
443 sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons);
445 rxq->rx_bd_head = bd_cons;
446 rxq->rx_bd_tail = bd_prod;
447 rxq->rx_cq_head = sw_cq_cons;
448 rxq->rx_cq_tail = sw_cq_prod;
450 bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod);
456 bnx2x_dev_rx_init(struct rte_eth_dev *dev)
458 dev->rx_pkt_burst = bnx2x_recv_pkts;
464 bnx2x_dev_clear_queues(struct rte_eth_dev *dev)
468 PMD_INIT_FUNC_TRACE();
470 for (i = 0; i < dev->data->nb_tx_queues; i++) {
471 struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];
473 bnx2x_tx_queue_release(txq);
474 dev->data->tx_queues[i] = NULL;
478 for (i = 0; i < dev->data->nb_rx_queues; i++) {
479 struct bnx2x_rx_queue *rxq = dev->data->rx_queues[i];
481 bnx2x_rx_queue_release(rxq);
482 dev->data->rx_queues[i] = NULL;