2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
4 * Copyright (c) 2015 QLogic Corporation.
8 * See LICENSE.bnx2x_pmd for copyright and licensing details.
12 #include "bnx2x_rxtx.h"
14 static inline struct rte_mbuf *
15 bnx2x_rxmbuf_alloc(struct rte_mempool *mp)
19 m = __rte_mbuf_raw_alloc(mp);
20 __rte_mbuf_sanity_check(m, 0);
25 static const struct rte_memzone *
26 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
27 uint16_t queue_id, uint32_t ring_size, int socket_id)
29 char z_name[RTE_MEMZONE_NAMESIZE];
30 const struct rte_memzone *mz;
32 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
33 dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id);
35 mz = rte_memzone_lookup(z_name);
39 return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE);
43 bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue)
46 struct rte_mbuf **sw_ring;
48 if (NULL != rx_queue) {
50 sw_ring = rx_queue->sw_ring;
51 if (NULL != sw_ring) {
52 for (i = 0; i < rx_queue->nb_rx_desc; i++) {
53 if (NULL != sw_ring[i])
54 rte_pktmbuf_free(sw_ring[i]);
63 bnx2x_dev_rx_queue_release(void *rxq)
65 bnx2x_rx_queue_release(rxq);
69 bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
72 unsigned int socket_id,
73 const struct rte_eth_rxconf *rx_conf,
74 struct rte_mempool *mp)
77 const struct rte_memzone *dma;
78 struct bnx2x_rx_queue *rxq;
80 struct rte_mbuf *mbuf;
81 struct bnx2x_softc *sc = dev->data->dev_private;
82 struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
83 struct eth_rx_cqe_next_page *nextpg;
87 /* First allocate the rx queue data structure */
88 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
89 RTE_CACHE_LINE_SIZE, socket_id);
91 PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
96 rxq->queue_id = queue_idx;
97 rxq->port_id = dev->data->port_id;
98 rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN);
100 rxq->nb_rx_pages = 1;
101 while (USABLE_RX_BD(rxq) < nb_desc)
102 rxq->nb_rx_pages <<= 1;
104 rxq->nb_rx_desc = TOTAL_RX_BD(rxq);
105 sc->rx_ring_size = USABLE_RX_BD(rxq);
106 rxq->nb_cq_pages = RCQ_BD_PAGES(rxq);
108 rxq->rx_free_thresh = rx_conf->rx_free_thresh ?
109 rx_conf->rx_free_thresh : DEFAULT_RX_FREE_THRESH;
111 PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
112 "total_bd=%lu, rx_pages=%u, cq_pages=%u",
113 queue_idx, nb_desc, rxq->rx_free_thresh, USABLE_RX_BD(rxq),
114 TOTAL_RX_BD(rxq), rxq->nb_rx_pages, rxq->nb_cq_pages);
116 /* Allocate RX ring hardware descriptors */
117 dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd);
118 dma = ring_dma_zone_reserve(dev, "hw_ring", queue_idx, dma_size, socket_id);
120 PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!");
121 bnx2x_rx_queue_release(rxq);
124 fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr;
125 rxq->rx_ring = (uint64_t*)dma->addr;
126 memset((void *)rxq->rx_ring, 0, dma_size);
128 /* Link the RX chain pages. */
129 for (j = 1; j <= rxq->nb_rx_pages; j++) {
130 rx_bd = &rxq->rx_ring[TOTAL_RX_BD_PER_PAGE * j - 2];
131 busaddr = rxq->rx_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_rx_pages);
135 /* Allocate software ring */
136 dma_size = rxq->nb_rx_desc * sizeof(struct bnx2x_rx_entry);
137 rxq->sw_ring = rte_zmalloc_socket("sw_ring", dma_size,
140 if (NULL == rxq->sw_ring) {
141 PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!");
142 bnx2x_rx_queue_release(rxq);
146 /* Initialize software ring entries */
147 rxq->rx_mbuf_alloc = 0;
148 for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) {
149 mbuf = bnx2x_rxmbuf_alloc(mp);
151 PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d",
152 (unsigned)rxq->queue_id, idx);
153 bnx2x_rx_queue_release(rxq);
156 rxq->sw_ring[idx] = mbuf;
157 rxq->rx_ring[idx] = mbuf->buf_physaddr;
158 rxq->rx_mbuf_alloc++;
160 rxq->pkt_first_seg = NULL;
161 rxq->pkt_last_seg = NULL;
163 rxq->rx_bd_tail = rxq->nb_rx_desc;
165 /* Allocate CQ chain. */
166 dma_size = BNX2X_RX_CHAIN_PAGE_SZ * rxq->nb_cq_pages;
167 dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id);
169 PMD_RX_LOG(ERR, "RCQ alloc failed");
172 fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr;
173 rxq->cq_ring = (union eth_rx_cqe*)dma->addr;
175 /* Link the CQ chain pages. */
176 for (j = 1; j <= rxq->nb_cq_pages; j++) {
177 nextpg = &rxq->cq_ring[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1].next_page_cqe;
178 busaddr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_cq_pages);
179 nextpg->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
180 nextpg->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
183 rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq);
185 dev->data->rx_queues[queue_idx] = rxq;
186 if (!sc->rx_queues) sc->rx_queues = dev->data->rx_queues;
192 bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue)
195 struct rte_mbuf **sw_ring;
197 if (NULL != tx_queue) {
199 sw_ring = tx_queue->sw_ring;
200 if (NULL != sw_ring) {
201 for (i = 0; i < tx_queue->nb_tx_desc; i++) {
202 if (NULL != sw_ring[i])
203 rte_pktmbuf_free(sw_ring[i]);
212 bnx2x_dev_tx_queue_release(void *txq)
214 bnx2x_tx_queue_release(txq);
218 bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
220 struct bnx2x_tx_queue *txq;
221 struct bnx2x_softc *sc;
222 struct bnx2x_fastpath *fp;
223 uint32_t burst, nb_tx;
224 struct rte_mbuf **m = tx_pkts;
229 fp = &sc->fp[txq->queue_id];
234 burst = RTE_MIN(nb_pkts, RTE_PMD_BNX2X_TX_MAX_BURST);
236 ret = bnx2x_tx_encap(txq, m, burst);
238 PMD_TX_LOG(ERR, "tx_encap failed!");
241 bnx2x_update_fp_sb_idx(fp);
243 if ((txq->nb_tx_desc - txq->nb_tx_avail) > txq->tx_free_thresh) {
247 if (unlikely(ret == -ENOMEM)) {
256 return nb_tx - nb_pkts;
260 bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
263 unsigned int socket_id,
264 const struct rte_eth_txconf *tx_conf)
268 const struct rte_memzone *tz;
269 struct bnx2x_tx_queue *txq;
270 struct eth_tx_next_bd *tx_n_bd;
272 struct bnx2x_softc *sc = dev->data->dev_private;
273 struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
275 /* First allocate the tx queue data structure */
276 txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue),
277 RTE_CACHE_LINE_SIZE);
282 txq->nb_tx_pages = 1;
283 while (USABLE_TX_BD(txq) < nb_desc)
284 txq->nb_tx_pages <<= 1;
286 txq->nb_tx_desc = TOTAL_TX_BD(txq);
287 sc->tx_ring_size = TOTAL_TX_BD(txq);
289 txq->tx_free_thresh = tx_conf->tx_free_thresh ?
290 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
292 PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
293 "total_bd=%lu, tx_pages=%u",
294 queue_idx, nb_desc, txq->tx_free_thresh, USABLE_TX_BD(txq),
295 TOTAL_TX_BD(txq), txq->nb_tx_pages);
297 /* Allocate TX ring hardware descriptors */
298 tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types);
299 tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id);
301 bnx2x_tx_queue_release(txq);
304 fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
305 txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
306 memset(txq->tx_ring, 0, tsize);
308 /* Allocate software ring */
309 tsize = txq->nb_tx_desc * sizeof(struct rte_mbuf *);
310 txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
311 RTE_CACHE_LINE_SIZE);
312 if (txq->sw_ring == NULL) {
313 bnx2x_tx_queue_release(txq);
317 /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
318 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */
321 for (i = 1; i <= txq->nb_tx_pages; i++) {
322 tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd;
323 busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages);
324 tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
325 tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
326 /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */
329 txq->queue_id = queue_idx;
330 txq->port_id = dev->data->port_id;
331 txq->tx_pkt_tail = 0;
332 txq->tx_pkt_head = 0;
335 txq->nb_tx_avail = txq->nb_tx_desc;
336 dev->tx_pkt_burst = bnx2x_xmit_pkts;
337 dev->data->tx_queues[queue_idx] = txq;
338 if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues;
344 bnx2x_upd_rx_prod_fast(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
345 uint16_t rx_bd_prod, uint16_t rx_cq_prod)
347 union ustorm_eth_rx_producers rx_prods;
349 rx_prods.prod.bd_prod = rx_bd_prod;
350 rx_prods.prod.cqe_prod = rx_cq_prod;
352 REG_WR(sc, fp->ustorm_rx_prods_offset, rx_prods.raw_data[0]);
356 bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
358 struct bnx2x_rx_queue *rxq = p_rxq;
359 struct bnx2x_softc *sc = rxq->sc;
360 struct bnx2x_fastpath *fp = &sc->fp[rxq->queue_id];
362 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
363 uint16_t bd_cons, bd_prod;
364 struct rte_mbuf *new_mb;
366 struct eth_fast_path_rx_cqe *cqe_fp;
368 struct rte_mbuf *rx_mb = NULL;
370 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
371 if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) ==
372 USABLE_RCQ_ENTRIES_PER_PAGE) {
376 bd_cons = rxq->rx_bd_head;
377 bd_prod = rxq->rx_bd_tail;
378 sw_cq_cons = rxq->rx_cq_head;
379 sw_cq_prod = rxq->rx_cq_tail;
381 if (sw_cq_cons == hw_cq_cons)
384 while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) {
386 bd_prod &= MAX_RX_BD(rxq);
387 bd_cons &= MAX_RX_BD(rxq);
389 cqe_fp = &rxq->cq_ring[sw_cq_cons & MAX_RX_BD(rxq)].fast_path_cqe;
391 if (unlikely(CQE_TYPE_SLOW(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_TYPE))) {
392 PMD_RX_LOG(ERR, "slowpath event during traffic processing");
396 if (unlikely(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
397 PMD_RX_LOG(ERR, "flags 0x%x rx packet %u",
398 cqe_fp->type_error_flags, sw_cq_cons);
402 len = cqe_fp->pkt_len_or_gro_seg_len;
403 pad = cqe_fp->placement_offset;
405 new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool);
406 if (unlikely(!new_mb)) {
407 PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index);
411 rx_mb = rxq->sw_ring[bd_cons];
412 rxq->sw_ring[bd_cons] = new_mb;
413 rxq->rx_ring[bd_prod] = new_mb->buf_physaddr;
415 rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq);
416 rte_prefetch0(rxq->sw_ring[rx_pref]);
417 if ((rx_pref & 0x3) == 0) {
418 rte_prefetch0(&rxq->rx_ring[rx_pref]);
419 rte_prefetch0(&rxq->sw_ring[rx_pref]);
422 rx_mb->data_off = pad;
425 rx_mb->pkt_len = rx_mb->data_len = len;
426 rx_mb->port = rxq->port_id;
427 rx_mb->buf_len = len + pad;
428 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
431 * If we received a packet with a vlan tag,
432 * attach that information to the packet.
434 if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) {
435 rx_mb->vlan_tci = cqe_fp->vlan_tag;
436 rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
439 rx_pkts[nb_rx] = rx_mb;
442 /* limit spinning on the queue */
443 if (unlikely(nb_rx == sc->rx_budget)) {
444 PMD_RX_LOG(ERR, "Limit spinning on the queue");
449 bd_cons = NEXT_RX_BD(bd_cons);
450 bd_prod = NEXT_RX_BD(bd_prod);
451 sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod);
452 sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons);
454 rxq->rx_bd_head = bd_cons;
455 rxq->rx_bd_tail = bd_prod;
456 rxq->rx_cq_head = sw_cq_cons;
457 rxq->rx_cq_tail = sw_cq_prod;
459 bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod);
465 bnx2x_dev_rx_init(struct rte_eth_dev *dev)
467 dev->rx_pkt_burst = bnx2x_recv_pkts;
473 bnx2x_dev_clear_queues(struct rte_eth_dev *dev)
477 PMD_INIT_FUNC_TRACE();
479 for (i = 0; i < dev->data->nb_tx_queues; i++) {
480 struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];
482 bnx2x_tx_queue_release(txq);
483 dev->data->tx_queues[i] = NULL;
487 for (i = 0; i < dev->data->nb_rx_queues; i++) {
488 struct bnx2x_rx_queue *rxq = dev->data->rx_queues[i];
490 bnx2x_rx_queue_release(rxq);
491 dev->data->rx_queues[i] = NULL;