2 * Copyright (c) 2016 - 2018 Cavium Inc.
6 * See LICENSE.qede_pmd for copyright and licensing details.
10 #include "qede_rxtx.h"
12 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
14 struct rte_mbuf *new_mb = NULL;
15 struct eth_rx_bd *rx_bd;
17 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
19 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
20 if (unlikely(!new_mb)) {
22 "Failed to allocate rx buffer "
23 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
24 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
25 rte_mempool_avail_count(rxq->mb_pool),
26 rte_mempool_in_use_count(rxq->mb_pool));
29 rxq->sw_rx_ring[idx].mbuf = new_mb;
30 rxq->sw_rx_ring[idx].page_offset = 0;
31 mapping = rte_mbuf_data_iova_default(new_mb);
32 /* Advance PROD and get BD pointer */
33 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
34 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
35 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
41 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
42 uint16_t nb_desc, unsigned int socket_id,
43 __rte_unused const struct rte_eth_rxconf *rx_conf,
44 struct rte_mempool *mp)
46 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
47 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
48 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
49 struct qede_rx_queue *rxq;
50 uint16_t max_rx_pkt_len;
55 PMD_INIT_FUNC_TRACE(edev);
57 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
58 if (!rte_is_power_of_2(nb_desc)) {
59 DP_ERR(edev, "Ring size %u is not power of 2\n",
64 /* Free memory prior to re-allocation if needed... */
65 if (dev->data->rx_queues[queue_idx] != NULL) {
66 qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
67 dev->data->rx_queues[queue_idx] = NULL;
70 /* First allocate the rx queue data structure */
71 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
72 RTE_CACHE_LINE_SIZE, socket_id);
75 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
82 rxq->nb_rx_desc = nb_desc;
83 rxq->queue_id = queue_idx;
84 rxq->port_id = dev->data->port_id;
86 max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
88 /* Fix up RX buffer size */
89 bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
90 if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
91 (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
92 if (!dev->data->scattered_rx) {
93 DP_INFO(edev, "Forcing scatter-gather mode\n");
94 dev->data->scattered_rx = 1;
98 if (dev->data->scattered_rx)
99 rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
100 ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
102 rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
103 /* Align to cache-line size if needed */
104 rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
106 DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
107 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
109 /* Allocate the parallel driver ring for Rx buffers */
110 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
111 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
112 RTE_CACHE_LINE_SIZE, socket_id);
113 if (!rxq->sw_rx_ring) {
114 DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
115 " socket %u\n", socket_id);
120 /* Allocate FW Rx ring */
121 rc = qdev->ops->common->chain_alloc(edev,
122 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
123 ECORE_CHAIN_MODE_NEXT_PTR,
124 ECORE_CHAIN_CNT_TYPE_U16,
126 sizeof(struct eth_rx_bd),
130 if (rc != ECORE_SUCCESS) {
131 DP_ERR(edev, "Memory allocation fails for RX BD ring"
132 " on socket %u\n", socket_id);
133 rte_free(rxq->sw_rx_ring);
138 /* Allocate FW completion ring */
139 rc = qdev->ops->common->chain_alloc(edev,
140 ECORE_CHAIN_USE_TO_CONSUME,
141 ECORE_CHAIN_MODE_PBL,
142 ECORE_CHAIN_CNT_TYPE_U16,
144 sizeof(union eth_rx_cqe),
148 if (rc != ECORE_SUCCESS) {
149 DP_ERR(edev, "Memory allocation fails for RX CQE ring"
150 " on socket %u\n", socket_id);
151 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
152 rte_free(rxq->sw_rx_ring);
157 dev->data->rx_queues[queue_idx] = rxq;
158 qdev->fp_array[queue_idx].rxq = rxq;
160 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
161 queue_idx, nb_desc, rxq->rx_buf_size, socket_id);
167 qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
168 struct qede_rx_queue *rxq)
170 DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
171 ecore_chain_reset(&rxq->rx_bd_ring);
172 ecore_chain_reset(&rxq->rx_comp_ring);
175 *rxq->hw_cons_ptr = 0;
178 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
182 if (rxq->sw_rx_ring) {
183 for (i = 0; i < rxq->nb_rx_desc; i++) {
184 if (rxq->sw_rx_ring[i].mbuf) {
185 rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
186 rxq->sw_rx_ring[i].mbuf = NULL;
192 void qede_rx_queue_release(void *rx_queue)
194 struct qede_rx_queue *rxq = rx_queue;
195 struct qede_dev *qdev = rxq->qdev;
196 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
198 PMD_INIT_FUNC_TRACE(edev);
201 qede_rx_queue_release_mbufs(rxq);
202 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
203 qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
204 rte_free(rxq->sw_rx_ring);
209 /* Stops a given RX queue in the HW */
210 static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
212 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
213 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
214 struct ecore_hwfn *p_hwfn;
215 struct qede_rx_queue *rxq;
219 if (rx_queue_id < eth_dev->data->nb_rx_queues) {
220 rxq = eth_dev->data->rx_queues[rx_queue_id];
221 hwfn_index = rx_queue_id % edev->num_hwfns;
222 p_hwfn = &edev->hwfns[hwfn_index];
223 rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
225 if (rc != ECORE_SUCCESS) {
226 DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
229 qede_rx_queue_release_mbufs(rxq);
230 qede_rx_queue_reset(qdev, rxq);
231 eth_dev->data->rx_queue_state[rx_queue_id] =
232 RTE_ETH_QUEUE_STATE_STOPPED;
233 DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
235 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
243 qede_tx_queue_setup(struct rte_eth_dev *dev,
246 unsigned int socket_id,
247 const struct rte_eth_txconf *tx_conf)
249 struct qede_dev *qdev = dev->data->dev_private;
250 struct ecore_dev *edev = &qdev->edev;
251 struct qede_tx_queue *txq;
254 PMD_INIT_FUNC_TRACE(edev);
256 if (!rte_is_power_of_2(nb_desc)) {
257 DP_ERR(edev, "Ring size %u is not power of 2\n",
262 /* Free memory prior to re-allocation if needed... */
263 if (dev->data->tx_queues[queue_idx] != NULL) {
264 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
265 dev->data->tx_queues[queue_idx] = NULL;
268 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
269 RTE_CACHE_LINE_SIZE, socket_id);
273 "Unable to allocate memory for txq on socket %u",
278 txq->nb_tx_desc = nb_desc;
280 txq->port_id = dev->data->port_id;
282 rc = qdev->ops->common->chain_alloc(edev,
283 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
284 ECORE_CHAIN_MODE_PBL,
285 ECORE_CHAIN_CNT_TYPE_U16,
287 sizeof(union eth_tx_bd_types),
290 if (rc != ECORE_SUCCESS) {
292 "Unable to allocate memory for txbd ring on socket %u",
294 qede_tx_queue_release(txq);
298 /* Allocate software ring */
299 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
300 (sizeof(struct qede_tx_entry) *
302 RTE_CACHE_LINE_SIZE, socket_id);
304 if (!txq->sw_tx_ring) {
306 "Unable to allocate memory for txbd ring on socket %u",
308 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
309 qede_tx_queue_release(txq);
313 txq->queue_id = queue_idx;
315 txq->nb_tx_avail = txq->nb_tx_desc;
317 txq->tx_free_thresh =
318 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
319 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
321 dev->data->tx_queues[queue_idx] = txq;
322 qdev->fp_array[queue_idx].txq = txq;
325 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
326 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
332 qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
333 struct qede_tx_queue *txq)
335 DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
336 ecore_chain_reset(&txq->tx_pbl);
339 *txq->hw_cons_ptr = 0;
342 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
346 if (txq->sw_tx_ring) {
347 for (i = 0; i < txq->nb_tx_desc; i++) {
348 if (txq->sw_tx_ring[i].mbuf) {
349 rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
350 txq->sw_tx_ring[i].mbuf = NULL;
356 void qede_tx_queue_release(void *tx_queue)
358 struct qede_tx_queue *txq = tx_queue;
359 struct qede_dev *qdev = txq->qdev;
360 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
362 PMD_INIT_FUNC_TRACE(edev);
365 qede_tx_queue_release_mbufs(txq);
366 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
367 rte_free(txq->sw_tx_ring);
372 /* This function allocates fast-path status block memory */
374 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
377 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
378 struct status_block_e4 *sb_virt;
382 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
383 sizeof(struct status_block_e4));
385 DP_ERR(edev, "Status block allocation failed\n");
388 rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
391 DP_ERR(edev, "Status block initialization failed\n");
392 OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
393 sizeof(struct status_block_e4));
400 int qede_alloc_fp_resc(struct qede_dev *qdev)
402 struct ecore_dev *edev = &qdev->edev;
403 struct qede_fastpath *fp;
408 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
410 num_sbs = ecore_cxt_get_proto_cid_count
411 (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
414 DP_ERR(edev, "No status blocks available\n");
418 qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
419 sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
421 if (!qdev->fp_array) {
422 DP_ERR(edev, "fp array allocation failed\n");
426 memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
427 sizeof(*qdev->fp_array));
429 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
430 fp = &qdev->fp_array[sb_idx];
433 fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
434 RTE_CACHE_LINE_SIZE);
436 DP_ERR(edev, "FP sb_info allocation fails\n");
439 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
440 DP_ERR(edev, "FP status block allocation fails\n");
443 DP_INFO(edev, "sb_info idx 0x%x initialized\n",
444 fp->sb_info->igu_sb_id);
450 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
452 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
453 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
454 struct qede_fastpath *fp;
458 PMD_INIT_FUNC_TRACE(edev);
460 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
461 fp = &qdev->fp_array[sb_idx];
464 DP_INFO(edev, "Free sb_info index 0x%x\n",
465 fp->sb_info->igu_sb_id);
467 OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
468 fp->sb_info->sb_phys,
469 sizeof(struct status_block_e4));
470 rte_free(fp->sb_info);
475 /* Free packet buffers and ring memories */
476 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
477 if (eth_dev->data->rx_queues[i]) {
478 qede_rx_queue_release(eth_dev->data->rx_queues[i]);
479 eth_dev->data->rx_queues[i] = NULL;
483 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
484 if (eth_dev->data->tx_queues[i]) {
485 qede_tx_queue_release(eth_dev->data->tx_queues[i]);
486 eth_dev->data->tx_queues[i] = NULL;
491 rte_free(qdev->fp_array);
492 qdev->fp_array = NULL;
496 qede_update_rx_prod(__rte_unused struct qede_dev *edev,
497 struct qede_rx_queue *rxq)
499 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
500 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
501 struct eth_rx_prod_data rx_prods = { 0 };
503 /* Update producers */
504 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
505 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
507 /* Make sure that the BD and SGE data is updated before updating the
508 * producers since FW might read the BD/SGE right after the producer
513 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
514 (uint32_t *)&rx_prods);
516 /* mmiowb is needed to synchronize doorbell writes from more than one
517 * processor. It guarantees that the write arrives to the device before
518 * the napi lock is released and another qede_poll is called (possibly
519 * on another CPU). Without this barrier, the next doorbell can bypass
520 * this doorbell. This is applicable to IA64/Altix systems.
524 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
527 /* Starts a given RX queue in HW */
529 qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
531 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
532 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
533 struct ecore_queue_start_common_params params;
534 struct ecore_rxq_start_ret_params ret_params;
535 struct qede_rx_queue *rxq;
536 struct qede_fastpath *fp;
537 struct ecore_hwfn *p_hwfn;
538 dma_addr_t p_phys_table;
544 if (rx_queue_id < eth_dev->data->nb_rx_queues) {
545 fp = &qdev->fp_array[rx_queue_id];
546 rxq = eth_dev->data->rx_queues[rx_queue_id];
547 /* Allocate buffers for the Rx ring */
548 for (j = 0; j < rxq->nb_rx_desc; j++) {
549 rc = qede_alloc_rx_buffer(rxq);
551 DP_ERR(edev, "RX buffer allocation failed"
552 " for rxq = %u\n", rx_queue_id);
556 /* disable interrupts */
557 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
559 memset(¶ms, 0, sizeof(params));
560 params.queue_id = rx_queue_id / edev->num_hwfns;
562 params.stats_id = params.vport_id;
563 params.p_sb = fp->sb_info;
564 DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
565 fp->rxq->queue_id, fp->sb_info->igu_sb_id);
566 params.sb_idx = RX_PI;
567 hwfn_index = rx_queue_id % edev->num_hwfns;
568 p_hwfn = &edev->hwfns[hwfn_index];
569 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
570 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
571 memset(&ret_params, 0, sizeof(ret_params));
572 rc = ecore_eth_rx_queue_start(p_hwfn,
573 p_hwfn->hw_info.opaque_fid,
574 ¶ms, fp->rxq->rx_buf_size,
575 fp->rxq->rx_bd_ring.p_phys_addr,
576 p_phys_table, page_cnt,
579 DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
583 /* Update with the returned parameters */
584 fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
585 fp->rxq->handle = ret_params.p_handle;
587 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
588 qede_update_rx_prod(qdev, fp->rxq);
589 eth_dev->data->rx_queue_state[rx_queue_id] =
590 RTE_ETH_QUEUE_STATE_STARTED;
591 DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
593 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
601 qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
603 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
604 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
605 struct ecore_queue_start_common_params params;
606 struct ecore_txq_start_ret_params ret_params;
607 struct ecore_hwfn *p_hwfn;
608 dma_addr_t p_phys_table;
609 struct qede_tx_queue *txq;
610 struct qede_fastpath *fp;
615 if (tx_queue_id < eth_dev->data->nb_tx_queues) {
616 txq = eth_dev->data->tx_queues[tx_queue_id];
617 fp = &qdev->fp_array[tx_queue_id];
618 memset(¶ms, 0, sizeof(params));
619 params.queue_id = tx_queue_id / edev->num_hwfns;
621 params.stats_id = params.vport_id;
622 params.p_sb = fp->sb_info;
623 DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
624 fp->txq->queue_id, fp->sb_info->igu_sb_id);
625 params.sb_idx = TX_PI(0); /* tc = 0 */
626 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
627 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
628 hwfn_index = tx_queue_id % edev->num_hwfns;
629 p_hwfn = &edev->hwfns[hwfn_index];
630 if (qdev->dev_info.is_legacy)
631 fp->txq->is_legacy = true;
632 rc = ecore_eth_tx_queue_start(p_hwfn,
633 p_hwfn->hw_info.opaque_fid,
635 p_phys_table, page_cnt,
637 if (rc != ECORE_SUCCESS) {
638 DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
642 txq->doorbell_addr = ret_params.p_doorbell;
643 txq->handle = ret_params.p_handle;
645 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
646 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
648 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
650 SET_FIELD(txq->tx_db.data.params,
651 ETH_DB_DATA_AGG_VAL_SEL,
652 DQ_XCM_ETH_TX_BD_PROD_CMD);
653 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
654 eth_dev->data->tx_queue_state[tx_queue_id] =
655 RTE_ETH_QUEUE_STATE_STARTED;
656 DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
658 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
666 qede_free_tx_pkt(struct qede_tx_queue *txq)
668 struct rte_mbuf *mbuf;
673 mbuf = txq->sw_tx_ring[idx].mbuf;
675 nb_segs = mbuf->nb_segs;
676 PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
678 /* It's like consuming rxbuf in recv() */
679 ecore_chain_consume(&txq->tx_pbl);
683 rte_pktmbuf_free(mbuf);
684 txq->sw_tx_ring[idx].mbuf = NULL;
686 PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
688 ecore_chain_consume(&txq->tx_pbl);
694 qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
695 struct qede_tx_queue *txq)
698 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
702 rte_compiler_barrier();
703 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
704 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
705 sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
706 PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
707 abs(hw_bd_cons - sw_tx_cons));
709 while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
710 qede_free_tx_pkt(txq);
713 static int qede_drain_txq(struct qede_dev *qdev,
714 struct qede_tx_queue *txq, bool allow_drain)
716 struct ecore_dev *edev = &qdev->edev;
719 while (txq->sw_tx_cons != txq->sw_tx_prod) {
720 qede_process_tx_compl(edev, txq);
723 DP_ERR(edev, "Tx queue[%u] is stuck,"
724 "requesting MCP to drain\n",
726 rc = qdev->ops->common->drain(edev);
729 return qede_drain_txq(qdev, txq, false);
731 DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
732 "PROD=%d, CONS=%d\n",
733 txq->queue_id, txq->sw_tx_prod,
739 rte_compiler_barrier();
742 /* FW finished processing, wait for HW to transmit all tx packets */
748 /* Stops a given TX queue in the HW */
749 static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
751 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
752 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
753 struct ecore_hwfn *p_hwfn;
754 struct qede_tx_queue *txq;
758 if (tx_queue_id < eth_dev->data->nb_tx_queues) {
759 txq = eth_dev->data->tx_queues[tx_queue_id];
761 if (qede_drain_txq(qdev, txq, true))
762 return -1; /* For the lack of retcodes */
764 hwfn_index = tx_queue_id % edev->num_hwfns;
765 p_hwfn = &edev->hwfns[hwfn_index];
766 rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
767 if (rc != ECORE_SUCCESS) {
768 DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
771 qede_tx_queue_release_mbufs(txq);
772 qede_tx_queue_reset(qdev, txq);
773 eth_dev->data->tx_queue_state[tx_queue_id] =
774 RTE_ETH_QUEUE_STATE_STOPPED;
775 DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
777 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
784 int qede_start_queues(struct rte_eth_dev *eth_dev)
786 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
791 rc = qede_rx_queue_start(eth_dev, id);
792 if (rc != ECORE_SUCCESS)
797 rc = qede_tx_queue_start(eth_dev, id);
798 if (rc != ECORE_SUCCESS)
805 void qede_stop_queues(struct rte_eth_dev *eth_dev)
807 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
810 /* Stopping RX/TX queues */
812 qede_tx_queue_stop(eth_dev, id);
816 qede_rx_queue_stop(eth_dev, id);
820 static inline bool qede_tunn_exist(uint16_t flag)
822 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
823 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
826 static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
828 return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
829 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
833 * qede_check_tunn_csum_l4:
835 * 1 : If L4 csum is enabled AND if the validation has failed.
838 static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
840 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
841 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
842 return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
843 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
848 static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
850 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
851 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
852 return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
853 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
858 /* Returns outer L2, L3 and L4 packet_type for tunneled packets */
859 static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
861 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
862 struct ether_hdr *eth_hdr;
863 struct ipv4_hdr *ipv4_hdr;
864 struct ipv6_hdr *ipv6_hdr;
865 struct vlan_hdr *vlan_hdr;
867 bool vlan_tagged = 0;
870 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
871 len = sizeof(struct ether_hdr);
872 ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
874 /* Note: Valid only if VLAN stripping is disabled */
875 if (ethertype == ETHER_TYPE_VLAN) {
877 vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
878 len += sizeof(struct vlan_hdr);
879 ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
882 if (ethertype == ETHER_TYPE_IPv4) {
883 packet_type |= RTE_PTYPE_L3_IPV4;
884 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, len);
885 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
886 packet_type |= RTE_PTYPE_L4_TCP;
887 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
888 packet_type |= RTE_PTYPE_L4_UDP;
889 } else if (ethertype == ETHER_TYPE_IPv6) {
890 packet_type |= RTE_PTYPE_L3_IPV6;
891 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, len);
892 if (ipv6_hdr->proto == IPPROTO_TCP)
893 packet_type |= RTE_PTYPE_L4_TCP;
894 else if (ipv6_hdr->proto == IPPROTO_UDP)
895 packet_type |= RTE_PTYPE_L4_UDP;
899 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
901 packet_type |= RTE_PTYPE_L2_ETHER;
906 static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
911 static const uint32_t
912 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
913 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4 |
914 RTE_PTYPE_INNER_L2_ETHER,
915 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6 |
916 RTE_PTYPE_INNER_L2_ETHER,
917 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4 |
918 RTE_PTYPE_INNER_L4_TCP |
919 RTE_PTYPE_INNER_L2_ETHER,
920 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6 |
921 RTE_PTYPE_INNER_L4_TCP |
922 RTE_PTYPE_INNER_L2_ETHER,
923 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4 |
924 RTE_PTYPE_INNER_L4_UDP |
925 RTE_PTYPE_INNER_L2_ETHER,
926 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6 |
927 RTE_PTYPE_INNER_L4_UDP |
928 RTE_PTYPE_INNER_L2_ETHER,
929 /* Frags with no VLAN */
930 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
931 RTE_PTYPE_INNER_L4_FRAG |
932 RTE_PTYPE_INNER_L2_ETHER,
933 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
934 RTE_PTYPE_INNER_L4_FRAG |
935 RTE_PTYPE_INNER_L2_ETHER,
937 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
938 RTE_PTYPE_INNER_L2_ETHER_VLAN,
939 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
940 RTE_PTYPE_INNER_L2_ETHER_VLAN,
941 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
942 RTE_PTYPE_INNER_L4_TCP |
943 RTE_PTYPE_INNER_L2_ETHER_VLAN,
944 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
945 RTE_PTYPE_INNER_L4_TCP |
946 RTE_PTYPE_INNER_L2_ETHER_VLAN,
947 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
948 RTE_PTYPE_INNER_L4_UDP |
949 RTE_PTYPE_INNER_L2_ETHER_VLAN,
950 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
951 RTE_PTYPE_INNER_L4_UDP |
952 RTE_PTYPE_INNER_L2_ETHER_VLAN,
953 /* Frags with VLAN */
954 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
955 RTE_PTYPE_INNER_L4_FRAG |
956 RTE_PTYPE_INNER_L2_ETHER_VLAN,
957 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
958 RTE_PTYPE_INNER_L4_FRAG |
959 RTE_PTYPE_INNER_L2_ETHER_VLAN,
962 /* Bits (0..3) provides L3/L4 protocol type */
963 /* Bits (4,5) provides frag and VLAN info */
964 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
965 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
966 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
967 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
968 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
969 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
970 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
971 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
973 if (val < QEDE_PKT_TYPE_MAX)
974 return ptype_lkup_tbl[val];
976 return RTE_PTYPE_UNKNOWN;
979 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
984 static const uint32_t
985 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
986 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
987 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
988 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 |
991 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 |
994 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 |
997 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 |
1000 /* Frags with no VLAN */
1001 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4 |
1004 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6 |
1008 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4 |
1009 RTE_PTYPE_L2_ETHER_VLAN,
1010 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6 |
1011 RTE_PTYPE_L2_ETHER_VLAN,
1012 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4 |
1014 RTE_PTYPE_L2_ETHER_VLAN,
1015 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6 |
1017 RTE_PTYPE_L2_ETHER_VLAN,
1018 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4 |
1020 RTE_PTYPE_L2_ETHER_VLAN,
1021 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6 |
1023 RTE_PTYPE_L2_ETHER_VLAN,
1024 /* Frags with VLAN */
1025 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4 |
1027 RTE_PTYPE_L2_ETHER_VLAN,
1028 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6 |
1030 RTE_PTYPE_L2_ETHER_VLAN,
1033 /* Bits (0..3) provides L3/L4 protocol type */
1034 /* Bits (4,5) provides frag and VLAN info */
1035 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1036 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1037 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1038 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1039 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1040 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1041 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1042 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1044 if (val < QEDE_PKT_TYPE_MAX)
1045 return ptype_lkup_tbl[val];
1047 return RTE_PTYPE_UNKNOWN;
1050 static inline uint8_t
1051 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
1053 struct ipv4_hdr *ip;
1058 val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1059 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
1061 if (unlikely(val)) {
1062 m->packet_type = qede_rx_cqe_to_pkt_type(flag);
1063 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
1064 ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
1065 sizeof(struct ether_hdr));
1066 pkt_csum = ip->hdr_checksum;
1067 ip->hdr_checksum = 0;
1068 calc_csum = rte_ipv4_cksum(ip);
1069 ip->hdr_checksum = pkt_csum;
1070 return (calc_csum != pkt_csum);
1071 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
1078 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
1080 ecore_chain_consume(&rxq->rx_bd_ring);
1085 qede_reuse_page(__rte_unused struct qede_dev *qdev,
1086 struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
1088 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
1089 uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1090 struct qede_rx_entry *curr_prod;
1091 dma_addr_t new_mapping;
1093 curr_prod = &rxq->sw_rx_ring[idx];
1094 *curr_prod = *curr_cons;
1096 new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
1097 curr_prod->page_offset;
1099 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
1100 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
1106 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
1107 struct qede_dev *qdev, uint8_t count)
1109 struct qede_rx_entry *curr_cons;
1111 for (; count > 0; count--) {
1112 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
1113 qede_reuse_page(qdev, rxq, curr_cons);
1114 qede_rx_bd_ring_consume(rxq);
1119 qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
1120 struct qede_rx_queue *rxq,
1121 uint8_t agg_index, uint16_t len)
1123 struct qede_agg_info *tpa_info;
1124 struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
1127 /* Under certain conditions it is possible that FW may not consume
1128 * additional or new BD. So decision to consume the BD must be made
1129 * based on len_list[0].
1131 if (rte_le_to_cpu_16(len)) {
1132 tpa_info = &rxq->tpa_info[agg_index];
1133 cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1134 curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
1136 curr_frag->nb_segs = 1;
1137 curr_frag->pkt_len = rte_le_to_cpu_16(len);
1138 curr_frag->data_len = curr_frag->pkt_len;
1139 tpa_info->tpa_tail->next = curr_frag;
1140 tpa_info->tpa_tail = curr_frag;
1141 qede_rx_bd_ring_consume(rxq);
1142 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1143 PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
1144 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1145 rxq->rx_alloc_errors++;
1151 qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
1152 struct qede_rx_queue *rxq,
1153 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1155 PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
1156 cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
1157 /* only len_list[0] will have value */
1158 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1163 qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
1164 struct qede_rx_queue *rxq,
1165 struct eth_fast_path_rx_tpa_end_cqe *cqe)
1167 struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
1169 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1171 /* Update total length and frags based on end TPA */
1172 rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
1173 /* TODO: Add Sanity Checks */
1174 rx_mb->nb_segs = cqe->num_of_bds;
1175 rx_mb->pkt_len = cqe->total_packet_len;
1177 PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
1178 " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
1179 rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
1183 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
1188 static const uint32_t
1189 ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
1190 [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
1191 [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
1192 [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
1193 [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
1194 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
1195 RTE_PTYPE_TUNNEL_GENEVE,
1196 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
1197 RTE_PTYPE_TUNNEL_GRE,
1198 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
1199 RTE_PTYPE_TUNNEL_VXLAN,
1200 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
1201 RTE_PTYPE_TUNNEL_GENEVE,
1202 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
1203 RTE_PTYPE_TUNNEL_GRE,
1204 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
1205 RTE_PTYPE_TUNNEL_VXLAN,
1206 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
1207 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1208 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
1209 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1210 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
1211 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1212 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
1213 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1214 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
1215 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1216 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
1217 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1218 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
1219 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1220 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
1221 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1222 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
1223 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1224 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
1225 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1226 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
1227 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1228 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
1229 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1232 /* Cover bits[4-0] to include tunn_type and next protocol */
1233 val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
1234 ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
1235 (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
1236 ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
1238 if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
1239 return ptype_tunn_lkup_tbl[val];
1241 return RTE_PTYPE_UNKNOWN;
1245 qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
1246 uint8_t num_segs, uint16_t pkt_len)
1248 struct qede_rx_queue *rxq = p_rxq;
1249 struct qede_dev *qdev = rxq->qdev;
1250 register struct rte_mbuf *seg1 = NULL;
1251 register struct rte_mbuf *seg2 = NULL;
1252 uint16_t sw_rx_index;
1257 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1259 if (unlikely(!cur_size)) {
1260 PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
1261 " left for mapping jumbo\n", num_segs);
1262 qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
1265 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1266 seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
1267 qede_rx_bd_ring_consume(rxq);
1268 pkt_len -= cur_size;
1269 seg2->data_len = cur_size;
1279 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1281 print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
1284 PMD_RX_LOG(INFO, rxq,
1285 "len 0x%04x bf 0x%04x hash_val 0x%x"
1286 " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
1287 " inner_l2=%s inner_l3=%s inner_l4=%s\n",
1288 m->data_len, bitfield, m->hash.rss,
1289 (unsigned long)m->ol_flags,
1290 rte_get_ptype_l2_name(m->packet_type),
1291 rte_get_ptype_l3_name(m->packet_type),
1292 rte_get_ptype_l4_name(m->packet_type),
1293 rte_get_ptype_tunnel_name(m->packet_type),
1294 rte_get_ptype_inner_l2_name(m->packet_type),
1295 rte_get_ptype_inner_l3_name(m->packet_type),
1296 rte_get_ptype_inner_l4_name(m->packet_type));
1301 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1303 struct qede_rx_queue *rxq = p_rxq;
1304 struct qede_dev *qdev = rxq->qdev;
1305 struct ecore_dev *edev = &qdev->edev;
1306 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
1307 uint16_t rx_pkt = 0;
1308 union eth_rx_cqe *cqe;
1309 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1310 register struct rte_mbuf *rx_mb = NULL;
1311 register struct rte_mbuf *seg1 = NULL;
1312 enum eth_rx_cqe_type cqe_type;
1313 uint16_t pkt_len = 0; /* Sum of all BD segments */
1314 uint16_t len; /* Length of first BD */
1315 uint8_t num_segs = 1;
1316 uint16_t preload_idx;
1317 uint16_t parse_flag;
1318 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1319 uint8_t bitfield_val;
1321 uint8_t tunn_parse_flag;
1323 struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
1325 uint32_t packet_type;
1328 uint8_t offset, tpa_agg_idx, flags;
1329 struct qede_agg_info *tpa_info = NULL;
1332 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1333 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1337 if (hw_comp_cons == sw_comp_cons)
1340 while (sw_comp_cons != hw_comp_cons) {
1342 packet_type = RTE_PTYPE_UNKNOWN;
1344 tpa_start_flg = false;
1347 /* Get the CQE from the completion ring */
1349 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1350 cqe_type = cqe->fast_path_regular.type;
1351 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1354 case ETH_RX_CQE_TYPE_REGULAR:
1355 fp_cqe = &cqe->fast_path_regular;
1357 case ETH_RX_CQE_TYPE_TPA_START:
1358 cqe_start_tpa = &cqe->fast_path_tpa_start;
1359 tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
1360 tpa_start_flg = true;
1361 /* Mark it as LRO packet */
1362 ol_flags |= PKT_RX_LRO;
1363 /* In split mode, seg_len is same as len_on_first_bd
1364 * and ext_bd_len_list will be empty since there are
1365 * no additional buffers
1367 PMD_RX_LOG(INFO, rxq,
1368 "TPA start[%d] - len_on_first_bd %d header %d"
1369 " [bd_list[0] %d], [seg_len %d]\n",
1370 cqe_start_tpa->tpa_agg_index,
1371 rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
1372 cqe_start_tpa->header_len,
1373 rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
1374 rte_le_to_cpu_16(cqe_start_tpa->seg_len));
1377 case ETH_RX_CQE_TYPE_TPA_CONT:
1378 qede_rx_process_tpa_cont_cqe(qdev, rxq,
1379 &cqe->fast_path_tpa_cont);
1381 case ETH_RX_CQE_TYPE_TPA_END:
1382 qede_rx_process_tpa_end_cqe(qdev, rxq,
1383 &cqe->fast_path_tpa_end);
1384 tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
1385 tpa_info = &rxq->tpa_info[tpa_agg_idx];
1386 rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
1388 case ETH_RX_CQE_TYPE_SLOW_PATH:
1389 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1390 ecore_eth_cqe_completion(
1391 &edev->hwfns[rxq->queue_id % edev->num_hwfns],
1392 (struct eth_slow_path_rx_cqe *)cqe);
1398 /* Get the data from the SW ring */
1399 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1400 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
1401 assert(rx_mb != NULL);
1403 /* Handle regular CQE or TPA start CQE */
1404 if (!tpa_start_flg) {
1405 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1406 offset = fp_cqe->placement_offset;
1407 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1408 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1409 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1410 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1411 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1412 bitfield_val = fp_cqe->bitfields;
1416 rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
1417 offset = cqe_start_tpa->placement_offset;
1418 /* seg_len = len_on_first_bd */
1419 len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
1420 vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
1421 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1422 bitfield_val = cqe_start_tpa->bitfields;
1424 rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
1426 if (qede_tunn_exist(parse_flag)) {
1427 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1428 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1429 PMD_RX_LOG(ERR, rxq,
1430 "L4 csum failed, flags = 0x%x\n",
1432 rxq->rx_hw_errors++;
1433 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1435 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1438 if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1439 PMD_RX_LOG(ERR, rxq,
1440 "Outer L3 csum failed, flags = 0x%x\n",
1442 rxq->rx_hw_errors++;
1443 ol_flags |= PKT_RX_EIP_CKSUM_BAD;
1445 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1449 flags = cqe_start_tpa->tunnel_pars_flags.flags;
1451 flags = fp_cqe->tunnel_pars_flags.flags;
1452 tunn_parse_flag = flags;
1456 qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
1460 qede_rx_cqe_to_pkt_type_inner(parse_flag);
1462 /* Outer L3/L4 types is not available in CQE */
1463 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1465 /* Outer L3/L4 types is not available in CQE.
1466 * Need to add offset to parse correctly,
1468 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1469 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1471 packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1474 /* Common handling for non-tunnel packets and for inner
1475 * headers in the case of tunnel.
1477 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1478 PMD_RX_LOG(ERR, rxq,
1479 "L4 csum failed, flags = 0x%x\n",
1481 rxq->rx_hw_errors++;
1482 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1484 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1486 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1487 PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1489 rxq->rx_hw_errors++;
1490 ol_flags |= PKT_RX_IP_CKSUM_BAD;
1492 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1495 if (CQE_HAS_VLAN(parse_flag) ||
1496 CQE_HAS_OUTER_VLAN(parse_flag)) {
1497 /* Note: FW doesn't indicate Q-in-Q packet */
1498 ol_flags |= PKT_RX_VLAN;
1499 if (qdev->vlan_strip_flg) {
1500 ol_flags |= PKT_RX_VLAN_STRIPPED;
1501 rx_mb->vlan_tci = vlan_tci;
1506 if (qdev->rss_enable) {
1507 ol_flags |= PKT_RX_RSS_HASH;
1508 rx_mb->hash.rss = rss_hash;
1511 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1512 PMD_RX_LOG(ERR, rxq,
1513 "New buffer allocation failed,"
1514 "dropping incoming packet\n");
1515 qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
1516 rte_eth_devices[rxq->port_id].
1517 data->rx_mbuf_alloc_failed++;
1518 rxq->rx_alloc_errors++;
1521 qede_rx_bd_ring_consume(rxq);
1523 if (!tpa_start_flg && fp_cqe->bd_num > 1) {
1524 PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
1525 " len on first: %04x Total Len: %04x",
1526 fp_cqe->bd_num, len, pkt_len);
1527 num_segs = fp_cqe->bd_num - 1;
1529 if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
1532 for (j = 0; j < num_segs; j++) {
1533 if (qede_alloc_rx_buffer(rxq)) {
1534 PMD_RX_LOG(ERR, rxq,
1535 "Buffer allocation failed");
1536 rte_eth_devices[rxq->port_id].
1537 data->rx_mbuf_alloc_failed++;
1538 rxq->rx_alloc_errors++;
1544 rxq->rx_segs++; /* for the first segment */
1546 /* Prefetch next mbuf while processing current one. */
1547 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1548 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
1550 /* Update rest of the MBUF fields */
1551 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1552 rx_mb->port = rxq->port_id;
1553 rx_mb->ol_flags = ol_flags;
1554 rx_mb->data_len = len;
1555 rx_mb->packet_type = packet_type;
1556 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1557 print_rx_bd_info(rx_mb, rxq, bitfield_val);
1559 if (!tpa_start_flg) {
1560 rx_mb->nb_segs = fp_cqe->bd_num;
1561 rx_mb->pkt_len = pkt_len;
1563 /* store ref to the updated mbuf */
1564 tpa_info->tpa_head = rx_mb;
1565 tpa_info->tpa_tail = tpa_info->tpa_head;
1567 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
1569 if (!tpa_start_flg) {
1570 rx_pkts[rx_pkt] = rx_mb;
1574 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1575 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1576 if (rx_pkt == nb_pkts) {
1577 PMD_RX_LOG(DEBUG, rxq,
1578 "Budget reached nb_pkts=%u received=%u",
1584 qede_update_rx_prod(qdev, rxq);
1586 rxq->rcv_pkts += rx_pkt;
1588 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
1594 /* Populate scatter gather buffer descriptor fields */
1595 static inline uint16_t
1596 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
1597 struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
1600 struct qede_tx_queue *txq = p_txq;
1601 struct eth_tx_bd *tx_bd = NULL;
1603 uint16_t nb_segs = 0;
1605 /* Check for scattered buffers */
1607 if (start_seg == 0) {
1609 *bd2 = (struct eth_tx_2nd_bd *)
1610 ecore_chain_produce(&txq->tx_pbl);
1611 memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
1614 mapping = rte_mbuf_data_iova(m_seg);
1615 QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
1616 PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
1617 } else if (start_seg == 1) {
1619 *bd3 = (struct eth_tx_3rd_bd *)
1620 ecore_chain_produce(&txq->tx_pbl);
1621 memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
1624 mapping = rte_mbuf_data_iova(m_seg);
1625 QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
1626 PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
1628 tx_bd = (struct eth_tx_bd *)
1629 ecore_chain_produce(&txq->tx_pbl);
1630 memset(tx_bd, 0, sizeof(*tx_bd));
1632 mapping = rte_mbuf_data_iova(m_seg);
1633 QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
1634 PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
1637 m_seg = m_seg->next;
1640 /* Return total scattered buffers */
1644 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1646 print_tx_bd_info(struct qede_tx_queue *txq,
1647 struct eth_tx_1st_bd *bd1,
1648 struct eth_tx_2nd_bd *bd2,
1649 struct eth_tx_3rd_bd *bd3,
1650 uint64_t tx_ol_flags)
1652 char ol_buf[256] = { 0 }; /* for verbose prints */
1655 PMD_TX_LOG(INFO, txq,
1656 "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
1657 rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
1658 bd1->data.bd_flags.bitfields,
1659 rte_cpu_to_le_16(bd1->data.bitfields));
1661 PMD_TX_LOG(INFO, txq,
1662 "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
1663 rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
1664 bd2->data.bitfields2, bd2->data.tunn_ip_size);
1666 PMD_TX_LOG(INFO, txq,
1667 "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
1668 "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
1669 rte_cpu_to_le_16(bd3->nbytes),
1670 rte_cpu_to_le_16(bd3->data.bitfields),
1671 rte_cpu_to_le_16(bd3->data.lso_mss),
1672 bd3->data.tunn_l4_hdr_start_offset_w,
1673 bd3->data.tunn_hdr_size_w);
1675 rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
1676 PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
1680 /* TX prepare to check packets meets TX conditions */
1682 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1683 qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
1686 struct qede_tx_queue *txq = p_txq;
1688 qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
1695 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1699 for (i = 0; i < nb_pkts; i++) {
1701 ol_flags = m->ol_flags;
1702 if (ol_flags & PKT_TX_TCP_SEG) {
1703 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
1704 rte_errno = -EINVAL;
1707 /* TBD: confirm its ~9700B for both ? */
1708 if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
1709 rte_errno = -EINVAL;
1713 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
1714 rte_errno = -EINVAL;
1718 if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
1719 rte_errno = -ENOTSUP;
1723 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1724 ret = rte_validate_tx_offload(m);
1732 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1733 if (unlikely(i != nb_pkts))
1734 PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
1740 #define MPLSINUDP_HDR_SIZE (12)
1742 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1744 qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
1745 struct qede_tx_queue *txq)
1747 if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
1748 PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
1749 if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
1750 MPLSINUDP_HDR_SIZE) / 2) > 0xff)
1751 PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
1752 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
1753 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
1754 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
1755 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
1756 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
1757 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
1762 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1764 struct qede_tx_queue *txq = p_txq;
1765 struct qede_dev *qdev = txq->qdev;
1766 struct ecore_dev *edev = &qdev->edev;
1767 struct rte_mbuf *mbuf;
1768 struct rte_mbuf *m_seg = NULL;
1769 uint16_t nb_tx_pkts;
1773 uint16_t nb_pkt_sent = 0;
1777 __rte_unused bool tunn_flg;
1778 bool tunn_ipv6_ext_flg;
1779 struct eth_tx_1st_bd *bd1;
1780 struct eth_tx_2nd_bd *bd2;
1781 struct eth_tx_3rd_bd *bd3;
1782 uint64_t tx_ol_flags;
1786 uint8_t bd1_bd_flags_bf;
1795 uint8_t tunn_l4_hdr_start_offset;
1796 uint8_t tunn_hdr_size;
1797 uint8_t inner_l2_hdr_size;
1798 uint16_t inner_l4_hdr_offset;
1800 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
1801 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
1802 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
1803 qede_process_tx_compl(edev, txq);
1806 nb_tx_pkts = nb_pkts;
1807 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1808 while (nb_tx_pkts--) {
1809 /* Init flags/values */
1819 bd1_bd_flags_bf = 0;
1824 mplsoudp_flg = false;
1825 tunn_ipv6_ext_flg = false;
1827 tunn_l4_hdr_start_offset = 0;
1832 /* Check minimum TX BDS availability against available BDs */
1833 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
1836 tx_ol_flags = mbuf->ol_flags;
1837 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1839 /* TX prepare would have already checked supported tunnel Tx
1840 * offloads. Don't rely on pkt_type marked by Rx, instead use
1841 * tx_ol_flags to decide.
1843 tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
1846 /* Check against max which is Tunnel IPv6 + ext */
1847 if (unlikely(txq->nb_tx_avail <
1848 ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
1851 /* First indicate its a tunnel pkt */
1852 bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
1853 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1854 /* Legacy FW had flipped behavior in regard to this bit
1855 * i.e. it needed to set to prevent FW from touching
1856 * encapsulated packets when it didn't need to.
1858 if (unlikely(txq->is_legacy)) {
1860 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1863 /* Outer IP checksum offload */
1864 if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
1865 PKT_TX_OUTER_IPV4)) {
1867 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
1868 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1872 * Currently, only inner checksum offload in MPLS-in-UDP
1873 * tunnel with one MPLS label is supported. Both outer
1874 * and inner layers lengths need to be provided in
1877 if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
1878 PKT_TX_TUNNEL_MPLSINUDP) {
1879 mplsoudp_flg = true;
1880 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1881 qede_mpls_tunn_tx_sanity_check(mbuf, txq);
1883 /* Outer L4 offset in two byte words */
1884 tunn_l4_hdr_start_offset =
1885 (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
1886 /* Tunnel header size in two byte words */
1887 tunn_hdr_size = (mbuf->outer_l2_len +
1888 mbuf->outer_l3_len +
1889 MPLSINUDP_HDR_SIZE) / 2;
1890 /* Inner L2 header size in two byte words */
1891 inner_l2_hdr_size = (mbuf->l2_len -
1892 MPLSINUDP_HDR_SIZE) / 2;
1893 /* Inner L4 header offset from the beggining
1894 * of inner packet in two byte words
1896 inner_l4_hdr_offset = (mbuf->l2_len -
1897 MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
1899 /* Inner L2 size and address type */
1900 bd2_bf1 |= (inner_l2_hdr_size &
1901 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
1902 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
1903 bd2_bf1 |= (UNICAST_ADDRESS &
1904 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
1905 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
1906 /* Treated as IPv6+Ext */
1908 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
1910 /* Mark inner IPv6 if present */
1911 if (tx_ol_flags & PKT_TX_IPV6)
1913 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
1915 /* Inner L4 offsets */
1916 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
1917 (tx_ol_flags & (PKT_TX_UDP_CKSUM |
1918 PKT_TX_TCP_CKSUM))) {
1919 /* Determines if BD3 is needed */
1920 tunn_ipv6_ext_flg = true;
1921 if ((tx_ol_flags & PKT_TX_L4_MASK) ==
1924 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
1927 /* TODO other pseudo checksum modes are
1931 ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
1932 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
1933 bd2_bf2 |= (inner_l4_hdr_offset &
1934 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
1935 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
1937 } /* End MPLSoUDP */
1938 } /* End Tunnel handling */
1940 if (tx_ol_flags & PKT_TX_TCP_SEG) {
1942 if (unlikely(txq->nb_tx_avail <
1943 ETH_TX_MIN_BDS_PER_LSO_PKT))
1945 /* For LSO, packet header and payload must reside on
1946 * buffers pointed by different BDs. Using BD1 for HDR
1947 * and BD2 onwards for data.
1949 hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
1951 hdr_size += mbuf->outer_l2_len +
1954 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
1956 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1957 /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
1959 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1960 mss = rte_cpu_to_le_16(mbuf->tso_segsz);
1961 /* Using one header BD */
1962 bd3_bf |= rte_cpu_to_le_16(1 <<
1963 ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1965 if (unlikely(txq->nb_tx_avail <
1966 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
1969 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
1970 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
1973 /* Descriptor based VLAN insertion */
1974 if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1975 vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
1977 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1980 /* Offload the IP checksum in the hardware */
1981 if (tx_ol_flags & PKT_TX_IP_CKSUM) {
1983 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1984 /* There's no DPDK flag to request outer-L4 csum
1985 * offload. But in the case of tunnel if inner L3 or L4
1986 * csum offload is requested then we need to force
1987 * recalculation of L4 tunnel header csum also.
1989 if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
1990 PKT_TX_TUNNEL_GRE)) {
1992 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
1993 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1997 /* L4 checksum offload (tcp or udp) */
1998 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
1999 (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
2001 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2002 /* There's no DPDK flag to request outer-L4 csum
2003 * offload. But in the case of tunnel if inner L3 or L4
2004 * csum offload is requested then we need to force
2005 * recalculation of L4 tunnel header csum also.
2009 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2010 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2014 /* Fill the entry in the SW ring and the BDs in the FW ring */
2016 txq->sw_tx_ring[idx].mbuf = mbuf;
2019 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2020 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2023 /* Map MBUF linear data for DMA and set in the BD1 */
2024 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2026 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2027 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2028 bd1->data.vlan = vlan;
2030 if (lso_flg || mplsoudp_flg) {
2031 bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
2033 memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
2037 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2040 QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
2041 rte_mbuf_data_iova(mbuf)),
2042 mbuf->data_len - hdr_size);
2043 bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
2045 bd2->data.bitfields2 =
2046 rte_cpu_to_le_16(bd2_bf2);
2048 bd2->data.tunn_ip_size =
2049 rte_cpu_to_le_16(mbuf->outer_l3_len);
2052 if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
2053 bd3 = (struct eth_tx_3rd_bd *)
2054 ecore_chain_produce(&txq->tx_pbl);
2055 memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
2057 bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
2059 bd3->data.lso_mss = mss;
2061 bd3->data.tunn_l4_hdr_start_offset_w =
2062 tunn_l4_hdr_start_offset;
2063 bd3->data.tunn_hdr_size_w =
2069 /* Handle fragmented MBUF */
2072 /* Encode scatter gather buffer descriptors if required */
2073 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
2074 bd1->data.nbds = nbds + nb_frags;
2076 txq->nb_tx_avail -= bd1->data.nbds;
2078 rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
2080 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2081 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2082 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2088 /* Write value of prod idx into bd_prod */
2089 txq->tx_db.data.bd_prod = bd_prod;
2091 rte_compiler_barrier();
2092 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2095 /* Check again for Tx completions */
2096 qede_process_tx_compl(edev, txq);
2098 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2099 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2105 qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
2106 __rte_unused struct rte_mbuf **pkts,
2107 __rte_unused uint16_t nb_pkts)