2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
10 #include "qede_rxtx.h"
12 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
14 struct rte_mbuf *new_mb = NULL;
15 struct eth_rx_bd *rx_bd;
17 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
19 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
20 if (unlikely(!new_mb)) {
22 "Failed to allocate rx buffer "
23 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
24 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
25 rte_mempool_avail_count(rxq->mb_pool),
26 rte_mempool_in_use_count(rxq->mb_pool));
29 rxq->sw_rx_ring[idx].mbuf = new_mb;
30 rxq->sw_rx_ring[idx].page_offset = 0;
31 mapping = rte_mbuf_data_dma_addr_default(new_mb);
32 /* Advance PROD and get BD pointer */
33 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
34 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
35 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
41 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
42 uint16_t nb_desc, unsigned int socket_id,
43 __rte_unused const struct rte_eth_rxconf *rx_conf,
44 struct rte_mempool *mp)
46 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
47 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
48 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
49 struct qede_rx_queue *rxq;
50 uint16_t max_rx_pkt_len;
55 PMD_INIT_FUNC_TRACE(edev);
57 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
58 if (!rte_is_power_of_2(nb_desc)) {
59 DP_ERR(edev, "Ring size %u is not power of 2\n",
64 /* Free memory prior to re-allocation if needed... */
65 if (dev->data->rx_queues[queue_idx] != NULL) {
66 qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
67 dev->data->rx_queues[queue_idx] = NULL;
70 /* First allocate the rx queue data structure */
71 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
72 RTE_CACHE_LINE_SIZE, socket_id);
75 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
82 rxq->nb_rx_desc = nb_desc;
83 rxq->queue_id = queue_idx;
84 rxq->port_id = dev->data->port_id;
85 max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
86 qdev->mtu = max_rx_pkt_len;
88 /* Fix up RX buffer size */
89 bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
90 if ((rxmode->enable_scatter) ||
91 (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
92 if (!dev->data->scattered_rx) {
93 DP_INFO(edev, "Forcing scatter-gather mode\n");
94 dev->data->scattered_rx = 1;
97 if (dev->data->scattered_rx)
98 rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
100 rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD;
101 /* Align to cache-line size if needed */
102 rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
104 DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
105 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
107 /* Allocate the parallel driver ring for Rx buffers */
108 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
109 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
110 RTE_CACHE_LINE_SIZE, socket_id);
111 if (!rxq->sw_rx_ring) {
112 DP_NOTICE(edev, false,
113 "Unable to alloc memory for sw_rx_ring on socket %u\n",
119 /* Allocate FW Rx ring */
120 rc = qdev->ops->common->chain_alloc(edev,
121 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
122 ECORE_CHAIN_MODE_NEXT_PTR,
123 ECORE_CHAIN_CNT_TYPE_U16,
125 sizeof(struct eth_rx_bd),
129 if (rc != ECORE_SUCCESS) {
130 DP_NOTICE(edev, false,
131 "Unable to alloc memory for rxbd ring on socket %u\n",
133 rte_free(rxq->sw_rx_ring);
138 /* Allocate FW completion ring */
139 rc = qdev->ops->common->chain_alloc(edev,
140 ECORE_CHAIN_USE_TO_CONSUME,
141 ECORE_CHAIN_MODE_PBL,
142 ECORE_CHAIN_CNT_TYPE_U16,
144 sizeof(union eth_rx_cqe),
148 if (rc != ECORE_SUCCESS) {
149 DP_NOTICE(edev, false,
150 "Unable to alloc memory for cqe ring on socket %u\n",
152 /* TBD: Freeing RX BD ring */
153 rte_free(rxq->sw_rx_ring);
158 dev->data->rx_queues[queue_idx] = rxq;
159 qdev->fp_array[queue_idx].rxq = rxq;
161 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
162 queue_idx, nb_desc, qdev->mtu, socket_id);
168 qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
169 struct qede_rx_queue *rxq)
171 DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
172 ecore_chain_reset(&rxq->rx_bd_ring);
173 ecore_chain_reset(&rxq->rx_comp_ring);
176 *rxq->hw_cons_ptr = 0;
179 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
183 if (rxq->sw_rx_ring) {
184 for (i = 0; i < rxq->nb_rx_desc; i++) {
185 if (rxq->sw_rx_ring[i].mbuf) {
186 rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
187 rxq->sw_rx_ring[i].mbuf = NULL;
193 void qede_rx_queue_release(void *rx_queue)
195 struct qede_rx_queue *rxq = rx_queue;
198 qede_rx_queue_release_mbufs(rxq);
199 rte_free(rxq->sw_rx_ring);
204 /* Stops a given RX queue in the HW */
205 static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
207 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
208 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
209 struct ecore_hwfn *p_hwfn;
210 struct qede_rx_queue *rxq;
214 if (rx_queue_id < eth_dev->data->nb_rx_queues) {
215 rxq = eth_dev->data->rx_queues[rx_queue_id];
216 hwfn_index = rx_queue_id % edev->num_hwfns;
217 p_hwfn = &edev->hwfns[hwfn_index];
218 rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
220 if (rc != ECORE_SUCCESS) {
221 DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
224 qede_rx_queue_release_mbufs(rxq);
225 qede_rx_queue_reset(qdev, rxq);
226 eth_dev->data->rx_queue_state[rx_queue_id] =
227 RTE_ETH_QUEUE_STATE_STOPPED;
228 DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
230 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
238 qede_tx_queue_setup(struct rte_eth_dev *dev,
241 unsigned int socket_id,
242 const struct rte_eth_txconf *tx_conf)
244 struct qede_dev *qdev = dev->data->dev_private;
245 struct ecore_dev *edev = &qdev->edev;
246 struct qede_tx_queue *txq;
249 PMD_INIT_FUNC_TRACE(edev);
251 if (!rte_is_power_of_2(nb_desc)) {
252 DP_ERR(edev, "Ring size %u is not power of 2\n",
257 /* Free memory prior to re-allocation if needed... */
258 if (dev->data->tx_queues[queue_idx] != NULL) {
259 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
260 dev->data->tx_queues[queue_idx] = NULL;
263 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
264 RTE_CACHE_LINE_SIZE, socket_id);
268 "Unable to allocate memory for txq on socket %u",
273 txq->nb_tx_desc = nb_desc;
275 txq->port_id = dev->data->port_id;
277 rc = qdev->ops->common->chain_alloc(edev,
278 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
279 ECORE_CHAIN_MODE_PBL,
280 ECORE_CHAIN_CNT_TYPE_U16,
282 sizeof(union eth_tx_bd_types),
285 if (rc != ECORE_SUCCESS) {
287 "Unable to allocate memory for txbd ring on socket %u",
289 qede_tx_queue_release(txq);
293 /* Allocate software ring */
294 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
295 (sizeof(struct qede_tx_entry) *
297 RTE_CACHE_LINE_SIZE, socket_id);
299 if (!txq->sw_tx_ring) {
301 "Unable to allocate memory for txbd ring on socket %u",
303 qede_tx_queue_release(txq);
307 txq->queue_id = queue_idx;
309 txq->nb_tx_avail = txq->nb_tx_desc;
311 txq->tx_free_thresh =
312 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
313 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
315 dev->data->tx_queues[queue_idx] = txq;
316 qdev->fp_array[queue_idx].txq = txq;
319 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
320 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
326 qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
327 struct qede_tx_queue *txq)
329 DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
330 ecore_chain_reset(&txq->tx_pbl);
333 *txq->hw_cons_ptr = 0;
336 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
340 if (txq->sw_tx_ring) {
341 for (i = 0; i < txq->nb_tx_desc; i++) {
342 if (txq->sw_tx_ring[i].mbuf) {
343 rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
344 txq->sw_tx_ring[i].mbuf = NULL;
350 void qede_tx_queue_release(void *tx_queue)
352 struct qede_tx_queue *txq = tx_queue;
355 qede_tx_queue_release_mbufs(txq);
356 rte_free(txq->sw_tx_ring);
361 /* This function allocates fast-path status block memory */
363 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
366 struct ecore_dev *edev = &qdev->edev;
367 struct status_block *sb_virt;
371 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
374 DP_ERR(edev, "Status block allocation failed\n");
378 rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
381 DP_ERR(edev, "Status block initialization failed\n");
382 /* TBD: No dma_free_coherent possible */
389 int qede_alloc_fp_resc(struct qede_dev *qdev)
391 struct ecore_dev *edev = &qdev->edev;
392 struct qede_fastpath *fp;
397 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
399 num_sbs = ecore_cxt_get_proto_cid_count
400 (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
403 DP_ERR(edev, "No status blocks available\n");
407 qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
408 sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
410 if (!qdev->fp_array) {
411 DP_ERR(edev, "fp array allocation failed\n");
415 memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
416 sizeof(*qdev->fp_array));
418 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
419 fp = &qdev->fp_array[sb_idx];
420 fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
421 RTE_CACHE_LINE_SIZE);
423 DP_ERR(edev, "FP sb_info allocation fails\n");
426 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
427 DP_ERR(edev, "FP status block allocation fails\n");
430 DP_INFO(edev, "sb_info idx 0x%x initialized\n",
431 fp->sb_info->igu_sb_id);
437 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
439 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
440 __rte_unused struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
441 struct qede_fastpath *fp;
444 PMD_INIT_FUNC_TRACE(edev);
446 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
447 fp = &qdev->fp_array[sb_idx];
448 DP_INFO(edev, "Free sb_info index 0x%x\n",
449 fp->sb_info->igu_sb_id);
451 rte_free(fp->sb_info);
455 rte_free(qdev->fp_array);
456 qdev->fp_array = NULL;
460 qede_update_rx_prod(__rte_unused struct qede_dev *edev,
461 struct qede_rx_queue *rxq)
463 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
464 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
465 struct eth_rx_prod_data rx_prods = { 0 };
467 /* Update producers */
468 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
469 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
471 /* Make sure that the BD and SGE data is updated before updating the
472 * producers since FW might read the BD/SGE right after the producer
477 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
478 (uint32_t *)&rx_prods);
480 /* mmiowb is needed to synchronize doorbell writes from more than one
481 * processor. It guarantees that the write arrives to the device before
482 * the napi lock is released and another qede_poll is called (possibly
483 * on another CPU). Without this barrier, the next doorbell can bypass
484 * this doorbell. This is applicable to IA64/Altix systems.
488 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
491 /* Starts a given RX queue in HW */
493 qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
495 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
496 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
497 struct ecore_queue_start_common_params params;
498 struct ecore_rxq_start_ret_params ret_params;
499 struct qede_rx_queue *rxq;
500 struct qede_fastpath *fp;
501 struct ecore_hwfn *p_hwfn;
502 dma_addr_t p_phys_table;
508 if (rx_queue_id < eth_dev->data->nb_rx_queues) {
509 fp = &qdev->fp_array[rx_queue_id];
510 rxq = eth_dev->data->rx_queues[rx_queue_id];
511 /* Allocate buffers for the Rx ring */
512 for (j = 0; j < rxq->nb_rx_desc; j++) {
513 rc = qede_alloc_rx_buffer(rxq);
515 DP_ERR(edev, "RX buffer allocation failed"
516 " for rxq = %u\n", rx_queue_id);
520 /* disable interrupts */
521 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
523 memset(¶ms, 0, sizeof(params));
524 params.queue_id = rx_queue_id;
526 params.sb = fp->sb_info->igu_sb_id;
527 DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
528 fp->rxq->queue_id, fp->sb_info->igu_sb_id);
529 params.sb_idx = RX_PI;
530 hwfn_index = rx_queue_id % edev->num_hwfns;
531 p_hwfn = &edev->hwfns[hwfn_index];
532 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
533 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
534 memset(&ret_params, 0, sizeof(ret_params));
535 rc = ecore_eth_rx_queue_start(p_hwfn,
536 p_hwfn->hw_info.opaque_fid,
537 ¶ms, fp->rxq->rx_buf_size,
538 fp->rxq->rx_bd_ring.p_phys_addr,
539 p_phys_table, page_cnt,
542 DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
546 /* Update with the returned parameters */
547 fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
548 fp->rxq->handle = ret_params.p_handle;
550 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
551 qede_update_rx_prod(qdev, fp->rxq);
552 eth_dev->data->rx_queue_state[rx_queue_id] =
553 RTE_ETH_QUEUE_STATE_STARTED;
554 DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
556 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
564 qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
566 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
567 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
568 struct ecore_queue_start_common_params params;
569 struct ecore_txq_start_ret_params ret_params;
570 struct ecore_hwfn *p_hwfn;
571 dma_addr_t p_phys_table;
572 struct qede_tx_queue *txq;
573 struct qede_fastpath *fp;
578 if (tx_queue_id < eth_dev->data->nb_tx_queues) {
579 txq = eth_dev->data->tx_queues[tx_queue_id];
580 fp = &qdev->fp_array[tx_queue_id];
581 memset(¶ms, 0, sizeof(params));
582 params.queue_id = tx_queue_id;
584 params.sb = fp->sb_info->igu_sb_id;
585 DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
586 fp->txq->queue_id, fp->sb_info->igu_sb_id);
587 params.sb_idx = TX_PI(0); /* tc = 0 */
588 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
589 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
590 hwfn_index = tx_queue_id % edev->num_hwfns;
591 p_hwfn = &edev->hwfns[hwfn_index];
592 if (qdev->dev_info.is_legacy)
593 fp->txq->is_legacy = true;
594 rc = ecore_eth_tx_queue_start(p_hwfn,
595 p_hwfn->hw_info.opaque_fid,
597 p_phys_table, page_cnt,
599 if (rc != ECORE_SUCCESS) {
600 DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
604 txq->doorbell_addr = ret_params.p_doorbell;
605 txq->handle = ret_params.p_handle;
607 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
608 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
610 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
612 SET_FIELD(txq->tx_db.data.params,
613 ETH_DB_DATA_AGG_VAL_SEL,
614 DQ_XCM_ETH_TX_BD_PROD_CMD);
615 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
616 eth_dev->data->tx_queue_state[tx_queue_id] =
617 RTE_ETH_QUEUE_STATE_STARTED;
618 DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
620 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
628 qede_free_tx_pkt(struct qede_tx_queue *txq)
630 struct rte_mbuf *mbuf;
635 mbuf = txq->sw_tx_ring[idx].mbuf;
637 nb_segs = mbuf->nb_segs;
638 PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
640 /* It's like consuming rxbuf in recv() */
641 ecore_chain_consume(&txq->tx_pbl);
645 rte_pktmbuf_free(mbuf);
646 txq->sw_tx_ring[idx].mbuf = NULL;
648 PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
650 ecore_chain_consume(&txq->tx_pbl);
656 qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
657 struct qede_tx_queue *txq)
660 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
664 rte_compiler_barrier();
665 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
666 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
667 sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
668 PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
669 abs(hw_bd_cons - sw_tx_cons));
671 while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
672 qede_free_tx_pkt(txq);
676 static int qede_drain_txq(struct qede_dev *qdev,
677 struct qede_tx_queue *txq, bool allow_drain)
679 struct ecore_dev *edev = &qdev->edev;
682 while (txq->sw_tx_cons != txq->sw_tx_prod) {
683 qede_process_tx_compl(edev, txq);
686 DP_ERR(edev, "Tx queue[%u] is stuck,"
687 "requesting MCP to drain\n",
689 rc = qdev->ops->common->drain(edev);
692 return qede_drain_txq(qdev, txq, false);
694 DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
695 "PROD=%d, CONS=%d\n",
696 txq->queue_id, txq->sw_tx_prod,
702 rte_compiler_barrier();
705 /* FW finished processing, wait for HW to transmit all tx packets */
712 /* Stops a given TX queue in the HW */
713 static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
715 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
716 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
717 struct ecore_hwfn *p_hwfn;
718 struct qede_tx_queue *txq;
722 if (tx_queue_id < eth_dev->data->nb_tx_queues) {
723 txq = eth_dev->data->tx_queues[tx_queue_id];
725 if (qede_drain_txq(qdev, txq, true))
726 return -1; /* For the lack of retcodes */
728 hwfn_index = tx_queue_id % edev->num_hwfns;
729 p_hwfn = &edev->hwfns[hwfn_index];
730 rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
731 if (rc != ECORE_SUCCESS) {
732 DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
735 qede_tx_queue_release_mbufs(txq);
736 qede_tx_queue_reset(qdev, txq);
737 eth_dev->data->tx_queue_state[tx_queue_id] =
738 RTE_ETH_QUEUE_STATE_STOPPED;
739 DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
741 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
748 int qede_start_queues(struct rte_eth_dev *eth_dev)
750 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
755 rc = qede_rx_queue_start(eth_dev, id);
756 if (rc != ECORE_SUCCESS)
761 rc = qede_tx_queue_start(eth_dev, id);
762 if (rc != ECORE_SUCCESS)
769 void qede_stop_queues(struct rte_eth_dev *eth_dev)
771 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
774 /* Stopping RX/TX queues */
776 qede_tx_queue_stop(eth_dev, id);
780 qede_rx_queue_stop(eth_dev, id);
784 static bool qede_tunn_exist(uint16_t flag)
786 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
787 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
791 * qede_check_tunn_csum_l4:
793 * 1 : If L4 csum is enabled AND if the validation has failed.
796 static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
798 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
799 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
800 return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
801 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
806 static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
808 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
809 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
810 return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
811 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
816 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
821 static const uint32_t
822 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
823 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
824 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
825 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
826 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
827 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
828 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
831 /* Bits (0..3) provides L3/L4 protocol type */
832 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
833 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
834 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
835 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
837 if (val < QEDE_PKT_TYPE_MAX)
838 return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
840 return RTE_PTYPE_UNKNOWN;
843 static inline uint8_t
844 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
851 val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
852 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
855 m->packet_type = qede_rx_cqe_to_pkt_type(flag);
856 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
857 ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
858 sizeof(struct ether_hdr));
859 pkt_csum = ip->hdr_checksum;
860 ip->hdr_checksum = 0;
861 calc_csum = rte_ipv4_cksum(ip);
862 ip->hdr_checksum = pkt_csum;
863 return (calc_csum != pkt_csum);
864 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
871 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
873 ecore_chain_consume(&rxq->rx_bd_ring);
878 qede_reuse_page(__rte_unused struct qede_dev *qdev,
879 struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
881 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
882 uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
883 struct qede_rx_entry *curr_prod;
884 dma_addr_t new_mapping;
886 curr_prod = &rxq->sw_rx_ring[idx];
887 *curr_prod = *curr_cons;
889 new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
890 curr_prod->page_offset;
892 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
893 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
899 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
900 struct qede_dev *qdev, uint8_t count)
902 struct qede_rx_entry *curr_cons;
904 for (; count > 0; count--) {
905 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
906 qede_reuse_page(qdev, rxq, curr_cons);
907 qede_rx_bd_ring_consume(rxq);
912 qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
913 struct qede_rx_queue *rxq,
914 uint8_t agg_index, uint16_t len)
916 struct qede_agg_info *tpa_info;
917 struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
920 /* Under certain conditions it is possible that FW may not consume
921 * additional or new BD. So decision to consume the BD must be made
922 * based on len_list[0].
924 if (rte_le_to_cpu_16(len)) {
925 tpa_info = &rxq->tpa_info[agg_index];
926 cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
927 curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
929 curr_frag->nb_segs = 1;
930 curr_frag->pkt_len = rte_le_to_cpu_16(len);
931 curr_frag->data_len = curr_frag->pkt_len;
932 tpa_info->tpa_tail->next = curr_frag;
933 tpa_info->tpa_tail = curr_frag;
934 qede_rx_bd_ring_consume(rxq);
935 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
936 PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
937 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
938 rxq->rx_alloc_errors++;
944 qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
945 struct qede_rx_queue *rxq,
946 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
948 PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
949 cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
950 /* only len_list[0] will have value */
951 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
956 qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
957 struct qede_rx_queue *rxq,
958 struct eth_fast_path_rx_tpa_end_cqe *cqe)
960 struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
962 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
964 /* Update total length and frags based on end TPA */
965 rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
966 /* TODO: Add Sanity Checks */
967 rx_mb->nb_segs = cqe->num_of_bds;
968 rx_mb->pkt_len = cqe->total_packet_len;
970 PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
971 " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
972 rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
976 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
981 static const uint32_t
982 ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
983 [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
984 [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
985 [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
986 [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
987 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
988 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
989 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
990 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
991 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
992 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
993 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
994 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
995 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
996 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
997 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
998 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
999 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
1000 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1001 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
1002 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1003 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
1004 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1005 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
1006 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1007 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
1008 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1009 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
1010 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1011 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
1012 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1013 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
1014 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1015 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
1016 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1017 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
1018 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1019 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
1020 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1021 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
1022 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1025 /* Cover bits[4-0] to include tunn_type and next protocol */
1026 val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
1027 ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
1028 (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
1029 ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
1031 if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
1032 return ptype_tunn_lkup_tbl[val];
1034 return RTE_PTYPE_UNKNOWN;
1038 qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
1039 uint8_t num_segs, uint16_t pkt_len)
1041 struct qede_rx_queue *rxq = p_rxq;
1042 struct qede_dev *qdev = rxq->qdev;
1043 register struct rte_mbuf *seg1 = NULL;
1044 register struct rte_mbuf *seg2 = NULL;
1045 uint16_t sw_rx_index;
1050 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1052 if (unlikely(!cur_size)) {
1053 PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
1054 " left for mapping jumbo", num_segs);
1055 qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
1058 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1059 seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
1060 qede_rx_bd_ring_consume(rxq);
1061 pkt_len -= cur_size;
1062 seg2->data_len = cur_size;
1073 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1075 struct qede_rx_queue *rxq = p_rxq;
1076 struct qede_dev *qdev = rxq->qdev;
1077 struct ecore_dev *edev = &qdev->edev;
1078 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
1079 uint16_t rx_pkt = 0;
1080 union eth_rx_cqe *cqe;
1081 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1082 register struct rte_mbuf *rx_mb = NULL;
1083 register struct rte_mbuf *seg1 = NULL;
1084 enum eth_rx_cqe_type cqe_type;
1085 uint16_t pkt_len = 0; /* Sum of all BD segments */
1086 uint16_t len; /* Length of first BD */
1087 uint8_t num_segs = 1;
1088 uint16_t preload_idx;
1089 uint16_t parse_flag;
1090 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1091 uint8_t bitfield_val;
1092 enum rss_hash_type htype;
1094 uint8_t tunn_parse_flag;
1096 struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
1098 uint32_t packet_type;
1101 uint8_t offset, tpa_agg_idx, flags;
1102 struct qede_agg_info *tpa_info = NULL;
1105 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1106 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1110 if (hw_comp_cons == sw_comp_cons)
1113 while (sw_comp_cons != hw_comp_cons) {
1115 packet_type = RTE_PTYPE_UNKNOWN;
1117 tpa_start_flg = false;
1120 /* Get the CQE from the completion ring */
1122 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1123 cqe_type = cqe->fast_path_regular.type;
1124 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1127 case ETH_RX_CQE_TYPE_REGULAR:
1128 fp_cqe = &cqe->fast_path_regular;
1130 case ETH_RX_CQE_TYPE_TPA_START:
1131 cqe_start_tpa = &cqe->fast_path_tpa_start;
1132 tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
1133 tpa_start_flg = true;
1134 /* Mark it as LRO packet */
1135 ol_flags |= PKT_RX_LRO;
1136 /* In split mode, seg_len is same as len_on_first_bd
1137 * and ext_bd_len_list will be empty since there are
1138 * no additional buffers
1140 PMD_RX_LOG(INFO, rxq,
1141 "TPA start[%d] - len_on_first_bd %d header %d"
1142 " [bd_list[0] %d], [seg_len %d]\n",
1143 cqe_start_tpa->tpa_agg_index,
1144 rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
1145 cqe_start_tpa->header_len,
1146 rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
1147 rte_le_to_cpu_16(cqe_start_tpa->seg_len));
1150 case ETH_RX_CQE_TYPE_TPA_CONT:
1151 qede_rx_process_tpa_cont_cqe(qdev, rxq,
1152 &cqe->fast_path_tpa_cont);
1154 case ETH_RX_CQE_TYPE_TPA_END:
1155 qede_rx_process_tpa_end_cqe(qdev, rxq,
1156 &cqe->fast_path_tpa_end);
1157 tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
1158 tpa_info = &rxq->tpa_info[tpa_agg_idx];
1159 rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
1161 case ETH_RX_CQE_TYPE_SLOW_PATH:
1162 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1163 ecore_eth_cqe_completion(
1164 &edev->hwfns[rxq->queue_id % edev->num_hwfns],
1165 (struct eth_slow_path_rx_cqe *)cqe);
1171 /* Get the data from the SW ring */
1172 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1173 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
1174 assert(rx_mb != NULL);
1176 /* Handle regular CQE or TPA start CQE */
1177 if (!tpa_start_flg) {
1178 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1179 offset = fp_cqe->placement_offset;
1180 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1181 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1182 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1183 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1184 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1185 bitfield_val = fp_cqe->bitfields;
1186 htype = (uint8_t)GET_FIELD(bitfield_val,
1187 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
1191 rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
1192 offset = cqe_start_tpa->placement_offset;
1193 /* seg_len = len_on_first_bd */
1194 len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
1195 vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
1196 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1197 bitfield_val = cqe_start_tpa->bitfields;
1198 htype = (uint8_t)GET_FIELD(bitfield_val,
1199 ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE);
1201 rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
1203 if (qede_tunn_exist(parse_flag)) {
1204 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1205 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1206 PMD_RX_LOG(ERR, rxq,
1207 "L4 csum failed, flags = 0x%x\n",
1209 rxq->rx_hw_errors++;
1210 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1212 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1215 cqe_start_tpa->tunnel_pars_flags.flags;
1217 flags = fp_cqe->tunnel_pars_flags.flags;
1218 tunn_parse_flag = flags;
1220 qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
1223 PMD_RX_LOG(INFO, rxq, "Rx non-tunneled packet\n");
1224 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1225 PMD_RX_LOG(ERR, rxq,
1226 "L4 csum failed, flags = 0x%x\n",
1228 rxq->rx_hw_errors++;
1229 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1231 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1233 if (unlikely(qede_check_notunn_csum_l3(rx_mb,
1235 PMD_RX_LOG(ERR, rxq,
1236 "IP csum failed, flags = 0x%x\n",
1238 rxq->rx_hw_errors++;
1239 ol_flags |= PKT_RX_IP_CKSUM_BAD;
1241 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1243 qede_rx_cqe_to_pkt_type(parse_flag);
1247 if (CQE_HAS_VLAN(parse_flag)) {
1248 ol_flags |= PKT_RX_VLAN_PKT;
1249 if (qdev->vlan_strip_flg) {
1250 ol_flags |= PKT_RX_VLAN_STRIPPED;
1251 rx_mb->vlan_tci = vlan_tci;
1254 if (CQE_HAS_OUTER_VLAN(parse_flag)) {
1255 ol_flags |= PKT_RX_QINQ_PKT;
1256 if (qdev->vlan_strip_flg) {
1257 rx_mb->vlan_tci = vlan_tci;
1258 ol_flags |= PKT_RX_QINQ_STRIPPED;
1260 rx_mb->vlan_tci_outer = 0;
1263 if (qdev->rss_enable) {
1264 ol_flags |= PKT_RX_RSS_HASH;
1265 rx_mb->hash.rss = rss_hash;
1268 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1269 PMD_RX_LOG(ERR, rxq,
1270 "New buffer allocation failed,"
1271 "dropping incoming packet\n");
1272 qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
1273 rte_eth_devices[rxq->port_id].
1274 data->rx_mbuf_alloc_failed++;
1275 rxq->rx_alloc_errors++;
1278 qede_rx_bd_ring_consume(rxq);
1280 if (!tpa_start_flg && fp_cqe->bd_num > 1) {
1281 PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
1282 " len on first: %04x Total Len: %04x",
1283 fp_cqe->bd_num, len, pkt_len);
1284 num_segs = fp_cqe->bd_num - 1;
1286 if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
1289 for (j = 0; j < num_segs; j++) {
1290 if (qede_alloc_rx_buffer(rxq)) {
1291 PMD_RX_LOG(ERR, rxq,
1292 "Buffer allocation failed");
1293 rte_eth_devices[rxq->port_id].
1294 data->rx_mbuf_alloc_failed++;
1295 rxq->rx_alloc_errors++;
1301 rxq->rx_segs++; /* for the first segment */
1303 /* Prefetch next mbuf while processing current one. */
1304 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1305 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
1307 /* Update rest of the MBUF fields */
1308 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1309 rx_mb->port = rxq->port_id;
1310 rx_mb->ol_flags = ol_flags;
1311 rx_mb->data_len = len;
1312 rx_mb->packet_type = packet_type;
1313 PMD_RX_LOG(INFO, rxq,
1314 "pkt_type 0x%04x len %u hash_type %d hash_val 0x%x"
1315 " ol_flags 0x%04lx\n",
1316 packet_type, len, htype, rx_mb->hash.rss,
1317 (unsigned long)ol_flags);
1318 if (!tpa_start_flg) {
1319 rx_mb->nb_segs = fp_cqe->bd_num;
1320 rx_mb->pkt_len = pkt_len;
1322 /* store ref to the updated mbuf */
1323 tpa_info->tpa_head = rx_mb;
1324 tpa_info->tpa_tail = tpa_info->tpa_head;
1326 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
1328 if (!tpa_start_flg) {
1329 rx_pkts[rx_pkt] = rx_mb;
1333 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1334 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1335 if (rx_pkt == nb_pkts) {
1336 PMD_RX_LOG(DEBUG, rxq,
1337 "Budget reached nb_pkts=%u received=%u",
1343 qede_update_rx_prod(qdev, rxq);
1345 rxq->rcv_pkts += rx_pkt;
1347 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
1353 /* Populate scatter gather buffer descriptor fields */
1354 static inline uint8_t
1355 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
1356 struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3)
1358 struct qede_tx_queue *txq = p_txq;
1359 struct eth_tx_bd *tx_bd = NULL;
1361 uint8_t nb_segs = 0;
1363 /* Check for scattered buffers */
1367 *bd2 = (struct eth_tx_2nd_bd *)
1368 ecore_chain_produce(&txq->tx_pbl);
1369 memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
1372 mapping = rte_mbuf_data_dma_addr(m_seg);
1373 QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
1374 PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
1375 } else if (nb_segs == 1) {
1377 *bd3 = (struct eth_tx_3rd_bd *)
1378 ecore_chain_produce(&txq->tx_pbl);
1379 memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
1382 mapping = rte_mbuf_data_dma_addr(m_seg);
1383 QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
1384 PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
1386 tx_bd = (struct eth_tx_bd *)
1387 ecore_chain_produce(&txq->tx_pbl);
1388 memset(tx_bd, 0, sizeof(*tx_bd));
1390 mapping = rte_mbuf_data_dma_addr(m_seg);
1391 QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
1392 PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
1394 m_seg = m_seg->next;
1397 /* Return total scattered buffers */
1401 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1403 print_tx_bd_info(struct qede_tx_queue *txq,
1404 struct eth_tx_1st_bd *bd1,
1405 struct eth_tx_2nd_bd *bd2,
1406 struct eth_tx_3rd_bd *bd3,
1407 uint64_t tx_ol_flags)
1409 char ol_buf[256] = { 0 }; /* for verbose prints */
1412 PMD_TX_LOG(INFO, txq,
1413 "BD1: nbytes=%u nbds=%u bd_flags=%04x bf=%04x",
1414 rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
1415 bd1->data.bd_flags.bitfields,
1416 rte_cpu_to_le_16(bd1->data.bitfields));
1418 PMD_TX_LOG(INFO, txq,
1419 "BD2: nbytes=%u bf=%04x\n",
1420 rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1);
1422 PMD_TX_LOG(INFO, txq,
1423 "BD3: nbytes=%u bf=%04x mss=%u\n",
1424 rte_cpu_to_le_16(bd3->nbytes),
1425 rte_cpu_to_le_16(bd3->data.bitfields),
1426 rte_cpu_to_le_16(bd3->data.lso_mss));
1428 rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
1429 PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
1433 /* TX prepare to check packets meets TX conditions */
1435 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1436 qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
1439 struct qede_tx_queue *txq = p_txq;
1441 qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
1448 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1452 for (i = 0; i < nb_pkts; i++) {
1454 ol_flags = m->ol_flags;
1455 if (ol_flags & PKT_TX_TCP_SEG) {
1456 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
1457 rte_errno = -EINVAL;
1460 /* TBD: confirm its ~9700B for both ? */
1461 if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
1462 rte_errno = -EINVAL;
1466 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
1467 rte_errno = -EINVAL;
1471 if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
1472 rte_errno = -ENOTSUP;
1476 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1477 ret = rte_validate_tx_offload(m);
1485 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1486 if (unlikely(i != nb_pkts))
1487 PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
1493 #define MPLSINUDP_HDR_SIZE (12)
1495 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1497 qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
1498 struct qede_tx_queue *txq)
1500 if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
1501 PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
1502 if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
1503 MPLSINUDP_HDR_SIZE) / 2) > 0xff)
1504 PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
1505 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
1506 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
1507 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
1508 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
1509 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
1510 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
1515 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1517 struct qede_tx_queue *txq = p_txq;
1518 struct qede_dev *qdev = txq->qdev;
1519 struct ecore_dev *edev = &qdev->edev;
1520 struct rte_mbuf *mbuf;
1521 struct rte_mbuf *m_seg = NULL;
1522 uint16_t nb_tx_pkts;
1526 uint16_t nb_pkt_sent = 0;
1530 __rte_unused bool tunn_flg;
1531 bool tunn_ipv6_ext_flg;
1532 struct eth_tx_1st_bd *bd1;
1533 struct eth_tx_2nd_bd *bd2;
1534 struct eth_tx_3rd_bd *bd3;
1535 uint64_t tx_ol_flags;
1539 uint8_t bd1_bd_flags_bf;
1548 uint8_t tunn_l4_hdr_start_offset;
1549 uint8_t tunn_hdr_size;
1550 uint8_t inner_l2_hdr_size;
1551 uint16_t inner_l4_hdr_offset;
1553 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
1554 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
1555 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
1556 qede_process_tx_compl(edev, txq);
1559 nb_tx_pkts = nb_pkts;
1560 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1561 while (nb_tx_pkts--) {
1562 /* Init flags/values */
1572 bd1_bd_flags_bf = 0;
1577 mplsoudp_flg = false;
1578 tunn_ipv6_ext_flg = false;
1580 tunn_l4_hdr_start_offset = 0;
1585 /* Check minimum TX BDS availability against available BDs */
1586 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
1589 tx_ol_flags = mbuf->ol_flags;
1590 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1592 /* TX prepare would have already checked supported tunnel Tx
1593 * offloads. Don't rely on pkt_type marked by Rx, instead use
1594 * tx_ol_flags to decide.
1596 if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
1597 PKT_TX_TUNNEL_VXLAN) ||
1598 ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
1599 PKT_TX_TUNNEL_MPLSINUDP)) {
1600 /* Check against max which is Tunnel IPv6 + ext */
1601 if (unlikely(txq->nb_tx_avail <
1602 ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
1605 /* First indicate its a tunnel pkt */
1606 bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
1607 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1608 /* Legacy FW had flipped behavior in regard to this bit
1609 * i.e. it needed to set to prevent FW from touching
1610 * encapsulated packets when it didn't need to.
1612 if (unlikely(txq->is_legacy)) {
1614 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1617 /* Outer IP checksum offload */
1618 if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
1619 PKT_TX_OUTER_IPV4)) {
1621 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
1622 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1626 * Currently, only inner checksum offload in MPLS-in-UDP
1627 * tunnel with one MPLS label is supported. Both outer
1628 * and inner layers lengths need to be provided in
1631 if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
1632 PKT_TX_TUNNEL_MPLSINUDP) {
1633 mplsoudp_flg = true;
1634 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1635 qede_mpls_tunn_tx_sanity_check(mbuf, txq);
1637 /* Outer L4 offset in two byte words */
1638 tunn_l4_hdr_start_offset =
1639 (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
1640 /* Tunnel header size in two byte words */
1641 tunn_hdr_size = (mbuf->outer_l2_len +
1642 mbuf->outer_l3_len +
1643 MPLSINUDP_HDR_SIZE) / 2;
1644 /* Inner L2 header size in two byte words */
1645 inner_l2_hdr_size = (mbuf->l2_len -
1646 MPLSINUDP_HDR_SIZE) / 2;
1647 /* Inner L4 header offset from the beggining
1648 * of inner packet in two byte words
1650 inner_l4_hdr_offset = (mbuf->l2_len -
1651 MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
1653 /* TODO: There's no DPDK flag to request outer
1654 * L4 checksum offload, so we don't do it.
1655 * bd1_bd_flags_bf |=
1656 * ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
1657 * ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1659 /* Inner L2 size and address type */
1660 bd2_bf1 |= (inner_l2_hdr_size &
1661 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
1662 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
1663 bd2_bf1 |= (UNICAST_ADDRESS &
1664 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
1665 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
1666 /* Treated as IPv6+Ext */
1668 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
1670 /* Mark inner IPv6 if present */
1671 if (tx_ol_flags & PKT_TX_IPV6)
1673 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
1675 /* Inner L4 offsets */
1676 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
1677 (tx_ol_flags & (PKT_TX_UDP_CKSUM |
1678 PKT_TX_TCP_CKSUM))) {
1679 /* Determines if BD3 is needed */
1680 tunn_ipv6_ext_flg = true;
1681 if ((tx_ol_flags & PKT_TX_L4_MASK) ==
1684 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
1687 /* TODO other pseudo checksum modes are
1691 ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
1692 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
1693 bd2_bf2 |= (inner_l4_hdr_offset &
1694 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
1695 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
1697 } /* End MPLSoUDP */
1698 } /* End Tunnel handling */
1700 if (tx_ol_flags & PKT_TX_TCP_SEG) {
1702 if (unlikely(txq->nb_tx_avail <
1703 ETH_TX_MIN_BDS_PER_LSO_PKT))
1705 /* For LSO, packet header and payload must reside on
1706 * buffers pointed by different BDs. Using BD1 for HDR
1707 * and BD2 onwards for data.
1709 hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
1710 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
1712 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1713 /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
1715 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1716 mss = rte_cpu_to_le_16(mbuf->tso_segsz);
1717 /* Using one header BD */
1718 bd3_bf |= rte_cpu_to_le_16(1 <<
1719 ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1721 if (unlikely(txq->nb_tx_avail <
1722 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
1725 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
1726 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
1729 /* Descriptor based VLAN insertion */
1730 if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1731 vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
1733 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1736 /* Offload the IP checksum in the hardware */
1737 if (tx_ol_flags & PKT_TX_IP_CKSUM)
1739 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1741 /* L4 checksum offload (tcp or udp) */
1742 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
1743 (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
1745 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1748 /* Fill the entry in the SW ring and the BDs in the FW ring */
1750 txq->sw_tx_ring[idx].mbuf = mbuf;
1753 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
1754 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
1757 /* Map MBUF linear data for DMA and set in the BD1 */
1758 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
1760 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
1761 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
1762 bd1->data.vlan = vlan;
1764 if (lso_flg || mplsoudp_flg) {
1765 bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
1767 memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
1771 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
1774 QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
1775 rte_mbuf_data_dma_addr(mbuf)),
1776 mbuf->data_len - hdr_size);
1777 bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
1779 bd2->data.bitfields2 =
1780 rte_cpu_to_le_16(bd2_bf2);
1782 bd2->data.tunn_ip_size =
1783 rte_cpu_to_le_16(mbuf->outer_l3_len);
1786 if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
1787 bd3 = (struct eth_tx_3rd_bd *)
1788 ecore_chain_produce(&txq->tx_pbl);
1789 memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
1791 bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
1793 bd3->data.lso_mss = mss;
1795 bd3->data.tunn_l4_hdr_start_offset_w =
1796 tunn_l4_hdr_start_offset;
1797 bd3->data.tunn_hdr_size_w =
1803 /* Handle fragmented MBUF */
1805 /* Encode scatter gather buffer descriptors if required */
1806 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3);
1807 bd1->data.nbds = nbds + nb_frags;
1808 txq->nb_tx_avail -= bd1->data.nbds;
1810 rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
1812 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1813 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1814 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
1815 PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d", lso_flg, tunn_flg);
1821 /* Write value of prod idx into bd_prod */
1822 txq->tx_db.data.bd_prod = bd_prod;
1824 rte_compiler_barrier();
1825 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
1828 /* Check again for Tx completions */
1829 qede_process_tx_compl(edev, txq);
1831 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
1832 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
1838 qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
1839 __rte_unused struct rte_mbuf **pkts,
1840 __rte_unused uint16_t nb_pkts)