2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
8 #include "bnx2x_rxtx.h"
10 static inline struct rte_mbuf *
11 bnx2x_rxmbuf_alloc(struct rte_mempool *mp)
15 m = __rte_mbuf_raw_alloc(mp);
16 __rte_mbuf_sanity_check(m, 0);
21 static const struct rte_memzone *
22 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
23 uint16_t queue_id, uint32_t ring_size, int socket_id)
25 char z_name[RTE_MEMZONE_NAMESIZE];
26 const struct rte_memzone *mz;
28 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
29 dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id);
31 mz = rte_memzone_lookup(z_name);
35 return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE);
39 bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue)
42 struct rte_mbuf **sw_ring;
44 if (NULL != rx_queue) {
46 sw_ring = rx_queue->sw_ring;
47 if (NULL != sw_ring) {
48 for (i = 0; i < rx_queue->nb_rx_desc; i++) {
49 if (NULL != sw_ring[i])
50 rte_pktmbuf_free(sw_ring[i]);
59 bnx2x_dev_rx_queue_release(void *rxq)
61 bnx2x_rx_queue_release(rxq);
65 bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
68 unsigned int socket_id,
69 const struct rte_eth_rxconf *rx_conf,
70 struct rte_mempool *mp)
73 const struct rte_memzone *dma;
74 struct bnx2x_rx_queue *rxq;
76 struct rte_mbuf *mbuf;
77 struct bnx2x_softc *sc = dev->data->dev_private;
78 struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
79 struct eth_rx_cqe_next_page *nextpg;
83 /* First allocate the rx queue data structure */
84 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
85 RTE_CACHE_LINE_SIZE, socket_id);
87 PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
92 rxq->queue_id = queue_idx;
93 rxq->port_id = dev->data->port_id;
94 rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN);
97 while (USABLE_RX_BD(rxq) < nb_desc)
98 rxq->nb_rx_pages <<= 1;
100 rxq->nb_rx_desc = TOTAL_RX_BD(rxq);
101 sc->rx_ring_size = USABLE_RX_BD(rxq);
102 rxq->nb_cq_pages = RCQ_BD_PAGES(rxq);
104 rxq->rx_free_thresh = rx_conf->rx_free_thresh ?
105 rx_conf->rx_free_thresh : DEFAULT_RX_FREE_THRESH;
107 PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
108 "total_bd=%lu, rx_pages=%u, cq_pages=%u",
109 queue_idx, nb_desc, rxq->rx_free_thresh, USABLE_RX_BD(rxq),
110 TOTAL_RX_BD(rxq), rxq->nb_rx_pages, rxq->nb_cq_pages);
112 /* Allocate RX ring hardware descriptors */
113 dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd);
114 dma = ring_dma_zone_reserve(dev, "hw_ring", queue_idx, dma_size, socket_id);
116 PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!");
117 bnx2x_rx_queue_release(rxq);
120 fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr;
121 rxq->rx_ring = (uint64_t*)dma->addr;
122 memset((void *)rxq->rx_ring, 0, dma_size);
124 /* Link the RX chain pages. */
125 for (j = 1; j <= rxq->nb_rx_pages; j++) {
126 rx_bd = &rxq->rx_ring[TOTAL_RX_BD_PER_PAGE * j - 2];
127 busaddr = rxq->rx_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_rx_pages);
131 /* Allocate software ring */
132 dma_size = rxq->nb_rx_desc * sizeof(struct bnx2x_rx_entry);
133 rxq->sw_ring = rte_zmalloc_socket("sw_ring", dma_size,
136 if (NULL == rxq->sw_ring) {
137 PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!");
138 bnx2x_rx_queue_release(rxq);
142 /* Initialize software ring entries */
143 rxq->rx_mbuf_alloc = 0;
144 for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) {
145 mbuf = bnx2x_rxmbuf_alloc(mp);
147 PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d",
148 (unsigned)rxq->queue_id, idx);
149 bnx2x_rx_queue_release(rxq);
152 rxq->sw_ring[idx] = mbuf;
153 rxq->rx_ring[idx] = mbuf->buf_physaddr;
154 rxq->rx_mbuf_alloc++;
156 rxq->pkt_first_seg = NULL;
157 rxq->pkt_last_seg = NULL;
159 rxq->rx_bd_tail = idx;
161 /* Allocate CQ chain. */
162 dma_size = BNX2X_RX_CHAIN_PAGE_SZ * rxq->nb_cq_pages;
163 dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id);
165 PMD_RX_LOG(ERR, "RCQ alloc failed");
168 fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr;
169 rxq->cq_ring = (union eth_rx_cqe*)dma->addr;
171 /* Link the CQ chain pages. */
172 for (j = 1; j <= rxq->nb_cq_pages; j++) {
173 nextpg = &rxq->cq_ring[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1].next_page_cqe;
174 busaddr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_cq_pages);
175 nextpg->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
176 nextpg->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
179 rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq);
181 dev->data->rx_queues[queue_idx] = rxq;
182 if (!sc->rx_queues) sc->rx_queues = dev->data->rx_queues;
188 bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue)
191 struct rte_mbuf **sw_ring;
193 if (NULL != tx_queue) {
195 sw_ring = tx_queue->sw_ring;
196 if (NULL != sw_ring) {
197 for (i = 0; i < tx_queue->nb_tx_desc; i++) {
198 if (NULL != sw_ring[i])
199 rte_pktmbuf_free(sw_ring[i]);
208 bnx2x_dev_tx_queue_release(void *txq)
210 bnx2x_tx_queue_release(txq);
214 bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
216 struct bnx2x_tx_queue *txq;
217 struct bnx2x_softc *sc;
218 struct bnx2x_fastpath *fp;
219 uint32_t burst, nb_tx;
220 struct rte_mbuf **m = tx_pkts;
225 fp = &sc->fp[txq->queue_id];
230 burst = RTE_MIN(nb_pkts, RTE_PMD_BNX2X_TX_MAX_BURST);
232 ret = bnx2x_tx_encap(txq, m, burst);
234 PMD_TX_LOG(ERR, "tx_encap failed!");
237 bnx2x_update_fp_sb_idx(fp);
239 if ((txq->nb_tx_desc - txq->nb_tx_avail) > txq->tx_free_thresh) {
243 if (unlikely(ret == ENOMEM)) {
252 return nb_tx - nb_pkts;
256 bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
259 unsigned int socket_id,
260 const struct rte_eth_txconf *tx_conf)
264 const struct rte_memzone *tz;
265 struct bnx2x_tx_queue *txq;
266 struct eth_tx_next_bd *tx_n_bd;
268 struct bnx2x_softc *sc = dev->data->dev_private;
269 struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
271 /* First allocate the tx queue data structure */
272 txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue),
273 RTE_CACHE_LINE_SIZE);
278 txq->nb_tx_pages = 1;
279 while (USABLE_TX_BD(txq) < nb_desc)
280 txq->nb_tx_pages <<= 1;
282 txq->nb_tx_desc = TOTAL_TX_BD(txq);
283 sc->tx_ring_size = TOTAL_TX_BD(txq);
285 txq->tx_free_thresh = tx_conf->tx_free_thresh ?
286 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
288 PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
289 "total_bd=%lu, tx_pages=%u",
290 queue_idx, nb_desc, txq->tx_free_thresh, USABLE_TX_BD(txq),
291 TOTAL_TX_BD(txq), txq->nb_tx_pages);
293 /* Allocate TX ring hardware descriptors */
294 tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types);
295 tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id);
297 bnx2x_tx_queue_release(txq);
300 fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
301 txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
302 memset(txq->tx_ring, 0, tsize);
304 /* Allocate software ring */
305 tsize = txq->nb_tx_desc * sizeof(struct rte_mbuf *);
306 txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
307 RTE_CACHE_LINE_SIZE);
308 if (txq->sw_ring == NULL) {
309 bnx2x_tx_queue_release(txq);
313 /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
314 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */
317 for (i = 1; i <= txq->nb_tx_pages; i++) {
318 tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd;
319 busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages);
320 tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
321 tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
322 /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */
325 txq->queue_id = queue_idx;
326 txq->port_id = dev->data->port_id;
327 txq->tx_pkt_tail = 0;
328 txq->tx_pkt_head = 0;
331 txq->nb_tx_avail = txq->nb_tx_desc;
332 dev->tx_pkt_burst = bnx2x_xmit_pkts;
333 dev->data->tx_queues[queue_idx] = txq;
334 if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues;
340 bnx2x_upd_rx_prod_fast(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
341 uint16_t rx_bd_prod, uint16_t rx_cq_prod)
343 union ustorm_eth_rx_producers rx_prods;
345 rx_prods.prod.bd_prod = rx_bd_prod;
346 rx_prods.prod.cqe_prod = rx_cq_prod;
348 REG_WR(sc, fp->ustorm_rx_prods_offset, rx_prods.raw_data[0]);
352 bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
354 struct bnx2x_rx_queue *rxq = p_rxq;
355 struct bnx2x_softc *sc = rxq->sc;
356 struct bnx2x_fastpath *fp = &sc->fp[rxq->queue_id];
358 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
359 uint16_t bd_cons, bd_prod;
360 struct rte_mbuf *new_mb;
362 struct eth_fast_path_rx_cqe *cqe_fp;
364 struct rte_mbuf *rx_mb = NULL;
366 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
367 if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) ==
368 USABLE_RCQ_ENTRIES_PER_PAGE) {
372 bd_cons = rxq->rx_bd_head;
373 bd_prod = rxq->rx_bd_tail;
374 sw_cq_cons = rxq->rx_cq_head;
375 sw_cq_prod = rxq->rx_cq_tail;
377 while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) {
379 bd_prod &= MAX_RX_BD(rxq);
380 bd_cons &= MAX_RX_BD(rxq);
382 cqe_fp = &rxq->cq_ring[sw_cq_cons & MAX_RX_BD(rxq)].fast_path_cqe;
384 if (unlikely(CQE_TYPE_SLOW(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_TYPE))) {
385 PMD_RX_LOG(ERR, "slowpath event during traffic processing");
389 if (unlikely(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
390 PMD_RX_LOG(ERR, "flags 0x%x rx packet %u",
391 cqe_fp->type_error_flags, sw_cq_cons);
395 len = cqe_fp->pkt_len_or_gro_seg_len;
396 pad = cqe_fp->placement_offset;
398 new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool);
399 if (unlikely(!new_mb)) {
400 PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index);
404 rx_mb = rxq->sw_ring[bd_cons];
405 rxq->sw_ring[bd_cons] = new_mb;
406 rxq->rx_ring[bd_prod] = new_mb->buf_physaddr;
408 rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq);
409 rte_prefetch0(rxq->sw_ring[rx_pref]);
410 if ((rx_pref & 0x3) == 0) {
411 rte_prefetch0(&rxq->rx_ring[rx_pref]);
412 rte_prefetch0(&rxq->sw_ring[rx_pref]);
415 rx_mb->data_off = pad;
418 rx_mb->pkt_len = rx_mb->data_len = len;
419 rx_mb->port = rxq->port_id;
420 rx_mb->buf_len = len + pad;
421 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
424 * If we received a packet with a vlan tag,
425 * attach that information to the packet.
427 if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) {
428 rx_mb->vlan_tci = cqe_fp->vlan_tag;
429 rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
432 rx_pkts[nb_rx] = rx_mb;
435 /* limit spinning on the queue */
436 if (unlikely(nb_rx == sc->rx_budget)) {
437 PMD_RX_LOG(ERR, "Limit spinning on the queue");
442 bd_cons = NEXT_RX_BD(bd_cons);
443 bd_prod = NEXT_RX_BD(bd_prod);
444 sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod);
445 sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons);
447 rxq->rx_bd_head = bd_cons;
448 rxq->rx_bd_tail = bd_prod;
449 rxq->rx_cq_head = sw_cq_cons;
450 rxq->rx_cq_tail = sw_cq_prod;
452 bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod);
458 bnx2x_dev_rx_init(struct rte_eth_dev *dev)
460 dev->rx_pkt_burst = bnx2x_recv_pkts;
466 bnx2x_dev_clear_queues(struct rte_eth_dev *dev)
470 PMD_INIT_FUNC_TRACE();
472 for (i = 0; i < dev->data->nb_tx_queues; i++) {
473 struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];
475 bnx2x_tx_queue_release(txq);
476 dev->data->tx_queues[i] = NULL;
480 for (i = 0; i < dev->data->nb_rx_queues; i++) {
481 struct bnx2x_rx_queue *rxq = dev->data->rx_queues[i];
483 bnx2x_rx_queue_release(rxq);
484 dev->data->rx_queues[i] = NULL;