1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
10 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
12 struct rte_mbuf *new_mb = NULL;
13 struct eth_rx_bd *rx_bd;
15 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
17 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
18 if (unlikely(!new_mb)) {
20 "Failed to allocate rx buffer "
21 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
22 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
23 rte_mempool_avail_count(rxq->mb_pool),
24 rte_mempool_in_use_count(rxq->mb_pool));
27 rxq->sw_rx_ring[idx].mbuf = new_mb;
28 rxq->sw_rx_ring[idx].page_offset = 0;
29 mapping = rte_mbuf_data_iova_default(new_mb);
30 /* Advance PROD and get BD pointer */
31 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
32 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
33 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
38 /* Criterias for calculating Rx buffer size -
39 * 1) rx_buf_size should not exceed the size of mbuf
40 * 2) In scattered_rx mode - minimum rx_buf_size should be
41 * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
42 * 3) In regular mode - minimum rx_buf_size should be
43 * (MTU + Maximum L2 Header Size + 2)
44 * In above cases +2 corrosponds to 2 bytes padding in front of L2
46 * 4) rx_buf_size should be cacheline-size aligned. So considering
47 * criteria 1, we need to adjust the size to floor instead of ceil,
48 * so that we don't exceed mbuf size while ceiling rx_buf_size.
51 qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
52 uint16_t max_frame_size)
54 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
55 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
58 if (dev->data->scattered_rx) {
59 /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
60 * bufferes can be used for single packet. So need to make sure
61 * mbuf size is sufficient enough for this.
63 if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
64 (max_frame_size + QEDE_ETH_OVERHEAD)) {
65 DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
66 mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
70 rx_buf_size = RTE_MAX(mbufsz,
71 (max_frame_size + QEDE_ETH_OVERHEAD) /
72 ETH_RX_MAX_BUFF_PER_PKT);
74 rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
77 /* Align to cache-line size if needed */
78 return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
82 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
83 uint16_t nb_desc, unsigned int socket_id,
84 __rte_unused const struct rte_eth_rxconf *rx_conf,
85 struct rte_mempool *mp)
87 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
88 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
89 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
90 struct qede_rx_queue *rxq;
91 uint16_t max_rx_pkt_len;
96 PMD_INIT_FUNC_TRACE(edev);
98 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
99 if (!rte_is_power_of_2(nb_desc)) {
100 DP_ERR(edev, "Ring size %u is not power of 2\n",
105 /* Free memory prior to re-allocation if needed... */
106 if (dev->data->rx_queues[queue_idx] != NULL) {
107 qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
108 dev->data->rx_queues[queue_idx] = NULL;
111 /* First allocate the rx queue data structure */
112 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
113 RTE_CACHE_LINE_SIZE, socket_id);
116 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
123 rxq->nb_rx_desc = nb_desc;
124 rxq->queue_id = queue_idx;
125 rxq->port_id = dev->data->port_id;
127 max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
129 /* Fix up RX buffer size */
130 bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
131 /* cache align the mbuf size to simplfy rx_buf_size calculation */
132 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
133 if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
134 (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
135 if (!dev->data->scattered_rx) {
136 DP_INFO(edev, "Forcing scatter-gather mode\n");
137 dev->data->scattered_rx = 1;
141 rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
147 rxq->rx_buf_size = rc;
149 DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
150 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
152 /* Allocate the parallel driver ring for Rx buffers */
153 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
154 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
155 RTE_CACHE_LINE_SIZE, socket_id);
156 if (!rxq->sw_rx_ring) {
157 DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
158 " socket %u\n", socket_id);
163 /* Allocate FW Rx ring */
164 rc = qdev->ops->common->chain_alloc(edev,
165 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
166 ECORE_CHAIN_MODE_NEXT_PTR,
167 ECORE_CHAIN_CNT_TYPE_U16,
169 sizeof(struct eth_rx_bd),
173 if (rc != ECORE_SUCCESS) {
174 DP_ERR(edev, "Memory allocation fails for RX BD ring"
175 " on socket %u\n", socket_id);
176 rte_free(rxq->sw_rx_ring);
181 /* Allocate FW completion ring */
182 rc = qdev->ops->common->chain_alloc(edev,
183 ECORE_CHAIN_USE_TO_CONSUME,
184 ECORE_CHAIN_MODE_PBL,
185 ECORE_CHAIN_CNT_TYPE_U16,
187 sizeof(union eth_rx_cqe),
191 if (rc != ECORE_SUCCESS) {
192 DP_ERR(edev, "Memory allocation fails for RX CQE ring"
193 " on socket %u\n", socket_id);
194 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
195 rte_free(rxq->sw_rx_ring);
200 dev->data->rx_queues[queue_idx] = rxq;
201 qdev->fp_array[queue_idx].rxq = rxq;
203 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
204 queue_idx, nb_desc, rxq->rx_buf_size, socket_id);
210 qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
211 struct qede_rx_queue *rxq)
213 DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
214 ecore_chain_reset(&rxq->rx_bd_ring);
215 ecore_chain_reset(&rxq->rx_comp_ring);
218 *rxq->hw_cons_ptr = 0;
221 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
225 if (rxq->sw_rx_ring) {
226 for (i = 0; i < rxq->nb_rx_desc; i++) {
227 if (rxq->sw_rx_ring[i].mbuf) {
228 rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
229 rxq->sw_rx_ring[i].mbuf = NULL;
235 void qede_rx_queue_release(void *rx_queue)
237 struct qede_rx_queue *rxq = rx_queue;
238 struct qede_dev *qdev = rxq->qdev;
239 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
241 PMD_INIT_FUNC_TRACE(edev);
244 qede_rx_queue_release_mbufs(rxq);
245 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
246 qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
247 rte_free(rxq->sw_rx_ring);
252 /* Stops a given RX queue in the HW */
253 static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
255 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
256 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
257 struct ecore_hwfn *p_hwfn;
258 struct qede_rx_queue *rxq;
262 if (rx_queue_id < eth_dev->data->nb_rx_queues) {
263 rxq = eth_dev->data->rx_queues[rx_queue_id];
264 hwfn_index = rx_queue_id % edev->num_hwfns;
265 p_hwfn = &edev->hwfns[hwfn_index];
266 rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
268 if (rc != ECORE_SUCCESS) {
269 DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
272 qede_rx_queue_release_mbufs(rxq);
273 qede_rx_queue_reset(qdev, rxq);
274 eth_dev->data->rx_queue_state[rx_queue_id] =
275 RTE_ETH_QUEUE_STATE_STOPPED;
276 DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
278 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
286 qede_tx_queue_setup(struct rte_eth_dev *dev,
289 unsigned int socket_id,
290 const struct rte_eth_txconf *tx_conf)
292 struct qede_dev *qdev = dev->data->dev_private;
293 struct ecore_dev *edev = &qdev->edev;
294 struct qede_tx_queue *txq;
297 PMD_INIT_FUNC_TRACE(edev);
299 if (!rte_is_power_of_2(nb_desc)) {
300 DP_ERR(edev, "Ring size %u is not power of 2\n",
305 /* Free memory prior to re-allocation if needed... */
306 if (dev->data->tx_queues[queue_idx] != NULL) {
307 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
308 dev->data->tx_queues[queue_idx] = NULL;
311 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
312 RTE_CACHE_LINE_SIZE, socket_id);
316 "Unable to allocate memory for txq on socket %u",
321 txq->nb_tx_desc = nb_desc;
323 txq->port_id = dev->data->port_id;
325 rc = qdev->ops->common->chain_alloc(edev,
326 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
327 ECORE_CHAIN_MODE_PBL,
328 ECORE_CHAIN_CNT_TYPE_U16,
330 sizeof(union eth_tx_bd_types),
333 if (rc != ECORE_SUCCESS) {
335 "Unable to allocate memory for txbd ring on socket %u",
337 qede_tx_queue_release(txq);
341 /* Allocate software ring */
342 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
343 (sizeof(struct qede_tx_entry) *
345 RTE_CACHE_LINE_SIZE, socket_id);
347 if (!txq->sw_tx_ring) {
349 "Unable to allocate memory for txbd ring on socket %u",
351 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
352 qede_tx_queue_release(txq);
356 txq->queue_id = queue_idx;
358 txq->nb_tx_avail = txq->nb_tx_desc;
360 txq->tx_free_thresh =
361 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
362 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
364 dev->data->tx_queues[queue_idx] = txq;
365 qdev->fp_array[queue_idx].txq = txq;
368 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
369 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
375 qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
376 struct qede_tx_queue *txq)
378 DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
379 ecore_chain_reset(&txq->tx_pbl);
382 *txq->hw_cons_ptr = 0;
385 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
389 if (txq->sw_tx_ring) {
390 for (i = 0; i < txq->nb_tx_desc; i++) {
391 if (txq->sw_tx_ring[i].mbuf) {
392 rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
393 txq->sw_tx_ring[i].mbuf = NULL;
399 void qede_tx_queue_release(void *tx_queue)
401 struct qede_tx_queue *txq = tx_queue;
402 struct qede_dev *qdev = txq->qdev;
403 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
405 PMD_INIT_FUNC_TRACE(edev);
408 qede_tx_queue_release_mbufs(txq);
409 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
410 rte_free(txq->sw_tx_ring);
415 /* This function allocates fast-path status block memory */
417 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
420 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
421 struct status_block_e4 *sb_virt;
425 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
426 sizeof(struct status_block_e4));
428 DP_ERR(edev, "Status block allocation failed\n");
431 rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
434 DP_ERR(edev, "Status block initialization failed\n");
435 OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
436 sizeof(struct status_block_e4));
443 int qede_alloc_fp_resc(struct qede_dev *qdev)
445 struct ecore_dev *edev = &qdev->edev;
446 struct qede_fastpath *fp;
451 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
453 num_sbs = ecore_cxt_get_proto_cid_count
454 (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
457 DP_ERR(edev, "No status blocks available\n");
461 qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
462 sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
464 if (!qdev->fp_array) {
465 DP_ERR(edev, "fp array allocation failed\n");
469 memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
470 sizeof(*qdev->fp_array));
472 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
473 fp = &qdev->fp_array[sb_idx];
476 fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
477 RTE_CACHE_LINE_SIZE);
479 DP_ERR(edev, "FP sb_info allocation fails\n");
482 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
483 DP_ERR(edev, "FP status block allocation fails\n");
486 DP_INFO(edev, "sb_info idx 0x%x initialized\n",
487 fp->sb_info->igu_sb_id);
493 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
495 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
496 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
497 struct qede_fastpath *fp;
501 PMD_INIT_FUNC_TRACE(edev);
503 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
504 fp = &qdev->fp_array[sb_idx];
507 DP_INFO(edev, "Free sb_info index 0x%x\n",
508 fp->sb_info->igu_sb_id);
510 OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
511 fp->sb_info->sb_phys,
512 sizeof(struct status_block_e4));
513 rte_free(fp->sb_info);
518 /* Free packet buffers and ring memories */
519 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
520 if (eth_dev->data->rx_queues[i]) {
521 qede_rx_queue_release(eth_dev->data->rx_queues[i]);
522 eth_dev->data->rx_queues[i] = NULL;
526 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
527 if (eth_dev->data->tx_queues[i]) {
528 qede_tx_queue_release(eth_dev->data->tx_queues[i]);
529 eth_dev->data->tx_queues[i] = NULL;
534 rte_free(qdev->fp_array);
535 qdev->fp_array = NULL;
539 qede_update_rx_prod(__rte_unused struct qede_dev *edev,
540 struct qede_rx_queue *rxq)
542 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
543 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
544 struct eth_rx_prod_data rx_prods = { 0 };
546 /* Update producers */
547 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
548 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
550 /* Make sure that the BD and SGE data is updated before updating the
551 * producers since FW might read the BD/SGE right after the producer
556 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
557 (uint32_t *)&rx_prods);
559 /* mmiowb is needed to synchronize doorbell writes from more than one
560 * processor. It guarantees that the write arrives to the device before
561 * the napi lock is released and another qede_poll is called (possibly
562 * on another CPU). Without this barrier, the next doorbell can bypass
563 * this doorbell. This is applicable to IA64/Altix systems.
567 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
570 /* Starts a given RX queue in HW */
572 qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
574 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
575 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
576 struct ecore_queue_start_common_params params;
577 struct ecore_rxq_start_ret_params ret_params;
578 struct qede_rx_queue *rxq;
579 struct qede_fastpath *fp;
580 struct ecore_hwfn *p_hwfn;
581 dma_addr_t p_phys_table;
587 if (rx_queue_id < eth_dev->data->nb_rx_queues) {
588 fp = &qdev->fp_array[rx_queue_id];
589 rxq = eth_dev->data->rx_queues[rx_queue_id];
590 /* Allocate buffers for the Rx ring */
591 for (j = 0; j < rxq->nb_rx_desc; j++) {
592 rc = qede_alloc_rx_buffer(rxq);
594 DP_ERR(edev, "RX buffer allocation failed"
595 " for rxq = %u\n", rx_queue_id);
599 /* disable interrupts */
600 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
602 memset(¶ms, 0, sizeof(params));
603 params.queue_id = rx_queue_id / edev->num_hwfns;
605 params.stats_id = params.vport_id;
606 params.p_sb = fp->sb_info;
607 DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
608 fp->rxq->queue_id, fp->sb_info->igu_sb_id);
609 params.sb_idx = RX_PI;
610 hwfn_index = rx_queue_id % edev->num_hwfns;
611 p_hwfn = &edev->hwfns[hwfn_index];
612 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
613 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
614 memset(&ret_params, 0, sizeof(ret_params));
615 rc = ecore_eth_rx_queue_start(p_hwfn,
616 p_hwfn->hw_info.opaque_fid,
617 ¶ms, fp->rxq->rx_buf_size,
618 fp->rxq->rx_bd_ring.p_phys_addr,
619 p_phys_table, page_cnt,
622 DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
626 /* Update with the returned parameters */
627 fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
628 fp->rxq->handle = ret_params.p_handle;
630 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
631 qede_update_rx_prod(qdev, fp->rxq);
632 eth_dev->data->rx_queue_state[rx_queue_id] =
633 RTE_ETH_QUEUE_STATE_STARTED;
634 DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
636 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
644 qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
646 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
647 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
648 struct ecore_queue_start_common_params params;
649 struct ecore_txq_start_ret_params ret_params;
650 struct ecore_hwfn *p_hwfn;
651 dma_addr_t p_phys_table;
652 struct qede_tx_queue *txq;
653 struct qede_fastpath *fp;
658 if (tx_queue_id < eth_dev->data->nb_tx_queues) {
659 txq = eth_dev->data->tx_queues[tx_queue_id];
660 fp = &qdev->fp_array[tx_queue_id];
661 memset(¶ms, 0, sizeof(params));
662 params.queue_id = tx_queue_id / edev->num_hwfns;
664 params.stats_id = params.vport_id;
665 params.p_sb = fp->sb_info;
666 DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
667 fp->txq->queue_id, fp->sb_info->igu_sb_id);
668 params.sb_idx = TX_PI(0); /* tc = 0 */
669 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
670 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
671 hwfn_index = tx_queue_id % edev->num_hwfns;
672 p_hwfn = &edev->hwfns[hwfn_index];
673 if (qdev->dev_info.is_legacy)
674 fp->txq->is_legacy = true;
675 rc = ecore_eth_tx_queue_start(p_hwfn,
676 p_hwfn->hw_info.opaque_fid,
678 p_phys_table, page_cnt,
680 if (rc != ECORE_SUCCESS) {
681 DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
685 txq->doorbell_addr = ret_params.p_doorbell;
686 txq->handle = ret_params.p_handle;
688 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
689 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
691 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
693 SET_FIELD(txq->tx_db.data.params,
694 ETH_DB_DATA_AGG_VAL_SEL,
695 DQ_XCM_ETH_TX_BD_PROD_CMD);
696 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
697 eth_dev->data->tx_queue_state[tx_queue_id] =
698 RTE_ETH_QUEUE_STATE_STARTED;
699 DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
701 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
709 qede_free_tx_pkt(struct qede_tx_queue *txq)
711 struct rte_mbuf *mbuf;
716 mbuf = txq->sw_tx_ring[idx].mbuf;
718 nb_segs = mbuf->nb_segs;
719 PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
721 /* It's like consuming rxbuf in recv() */
722 ecore_chain_consume(&txq->tx_pbl);
726 rte_pktmbuf_free(mbuf);
727 txq->sw_tx_ring[idx].mbuf = NULL;
729 PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
731 ecore_chain_consume(&txq->tx_pbl);
737 qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
738 struct qede_tx_queue *txq)
741 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
745 rte_compiler_barrier();
746 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
747 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
748 sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
749 PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
750 abs(hw_bd_cons - sw_tx_cons));
752 while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
753 qede_free_tx_pkt(txq);
756 static int qede_drain_txq(struct qede_dev *qdev,
757 struct qede_tx_queue *txq, bool allow_drain)
759 struct ecore_dev *edev = &qdev->edev;
762 while (txq->sw_tx_cons != txq->sw_tx_prod) {
763 qede_process_tx_compl(edev, txq);
766 DP_ERR(edev, "Tx queue[%u] is stuck,"
767 "requesting MCP to drain\n",
769 rc = qdev->ops->common->drain(edev);
772 return qede_drain_txq(qdev, txq, false);
774 DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
775 "PROD=%d, CONS=%d\n",
776 txq->queue_id, txq->sw_tx_prod,
782 rte_compiler_barrier();
785 /* FW finished processing, wait for HW to transmit all tx packets */
791 /* Stops a given TX queue in the HW */
792 static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
794 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
795 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
796 struct ecore_hwfn *p_hwfn;
797 struct qede_tx_queue *txq;
801 if (tx_queue_id < eth_dev->data->nb_tx_queues) {
802 txq = eth_dev->data->tx_queues[tx_queue_id];
804 if (qede_drain_txq(qdev, txq, true))
805 return -1; /* For the lack of retcodes */
807 hwfn_index = tx_queue_id % edev->num_hwfns;
808 p_hwfn = &edev->hwfns[hwfn_index];
809 rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
810 if (rc != ECORE_SUCCESS) {
811 DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
814 qede_tx_queue_release_mbufs(txq);
815 qede_tx_queue_reset(qdev, txq);
816 eth_dev->data->tx_queue_state[tx_queue_id] =
817 RTE_ETH_QUEUE_STATE_STOPPED;
818 DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
820 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
827 int qede_start_queues(struct rte_eth_dev *eth_dev)
829 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
834 rc = qede_rx_queue_start(eth_dev, id);
835 if (rc != ECORE_SUCCESS)
840 rc = qede_tx_queue_start(eth_dev, id);
841 if (rc != ECORE_SUCCESS)
848 void qede_stop_queues(struct rte_eth_dev *eth_dev)
850 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
853 /* Stopping RX/TX queues */
855 qede_tx_queue_stop(eth_dev, id);
859 qede_rx_queue_stop(eth_dev, id);
863 static inline bool qede_tunn_exist(uint16_t flag)
865 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
866 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
869 static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
871 return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
872 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
876 * qede_check_tunn_csum_l4:
878 * 1 : If L4 csum is enabled AND if the validation has failed.
881 static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
883 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
884 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
885 return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
886 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
891 static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
893 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
894 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
895 return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
896 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
901 /* Returns outer L2, L3 and L4 packet_type for tunneled packets */
902 static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
904 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
905 struct ether_hdr *eth_hdr;
906 struct ipv4_hdr *ipv4_hdr;
907 struct ipv6_hdr *ipv6_hdr;
908 struct vlan_hdr *vlan_hdr;
910 bool vlan_tagged = 0;
913 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
914 len = sizeof(struct ether_hdr);
915 ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
917 /* Note: Valid only if VLAN stripping is disabled */
918 if (ethertype == ETHER_TYPE_VLAN) {
920 vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
921 len += sizeof(struct vlan_hdr);
922 ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
925 if (ethertype == ETHER_TYPE_IPv4) {
926 packet_type |= RTE_PTYPE_L3_IPV4;
927 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, len);
928 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
929 packet_type |= RTE_PTYPE_L4_TCP;
930 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
931 packet_type |= RTE_PTYPE_L4_UDP;
932 } else if (ethertype == ETHER_TYPE_IPv6) {
933 packet_type |= RTE_PTYPE_L3_IPV6;
934 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, len);
935 if (ipv6_hdr->proto == IPPROTO_TCP)
936 packet_type |= RTE_PTYPE_L4_TCP;
937 else if (ipv6_hdr->proto == IPPROTO_UDP)
938 packet_type |= RTE_PTYPE_L4_UDP;
942 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
944 packet_type |= RTE_PTYPE_L2_ETHER;
949 static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
954 static const uint32_t
955 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
956 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4 |
957 RTE_PTYPE_INNER_L2_ETHER,
958 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6 |
959 RTE_PTYPE_INNER_L2_ETHER,
960 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4 |
961 RTE_PTYPE_INNER_L4_TCP |
962 RTE_PTYPE_INNER_L2_ETHER,
963 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6 |
964 RTE_PTYPE_INNER_L4_TCP |
965 RTE_PTYPE_INNER_L2_ETHER,
966 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4 |
967 RTE_PTYPE_INNER_L4_UDP |
968 RTE_PTYPE_INNER_L2_ETHER,
969 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6 |
970 RTE_PTYPE_INNER_L4_UDP |
971 RTE_PTYPE_INNER_L2_ETHER,
972 /* Frags with no VLAN */
973 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
974 RTE_PTYPE_INNER_L4_FRAG |
975 RTE_PTYPE_INNER_L2_ETHER,
976 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
977 RTE_PTYPE_INNER_L4_FRAG |
978 RTE_PTYPE_INNER_L2_ETHER,
980 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
981 RTE_PTYPE_INNER_L2_ETHER_VLAN,
982 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
983 RTE_PTYPE_INNER_L2_ETHER_VLAN,
984 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
985 RTE_PTYPE_INNER_L4_TCP |
986 RTE_PTYPE_INNER_L2_ETHER_VLAN,
987 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
988 RTE_PTYPE_INNER_L4_TCP |
989 RTE_PTYPE_INNER_L2_ETHER_VLAN,
990 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
991 RTE_PTYPE_INNER_L4_UDP |
992 RTE_PTYPE_INNER_L2_ETHER_VLAN,
993 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
994 RTE_PTYPE_INNER_L4_UDP |
995 RTE_PTYPE_INNER_L2_ETHER_VLAN,
996 /* Frags with VLAN */
997 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
998 RTE_PTYPE_INNER_L4_FRAG |
999 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1000 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1001 RTE_PTYPE_INNER_L4_FRAG |
1002 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1005 /* Bits (0..3) provides L3/L4 protocol type */
1006 /* Bits (4,5) provides frag and VLAN info */
1007 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1008 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1009 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1010 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1011 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1012 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1013 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1014 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1016 if (val < QEDE_PKT_TYPE_MAX)
1017 return ptype_lkup_tbl[val];
1019 return RTE_PTYPE_UNKNOWN;
1022 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
1027 static const uint32_t
1028 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1029 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
1030 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
1031 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 |
1034 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 |
1037 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 |
1040 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 |
1043 /* Frags with no VLAN */
1044 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4 |
1047 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6 |
1051 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4 |
1052 RTE_PTYPE_L2_ETHER_VLAN,
1053 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6 |
1054 RTE_PTYPE_L2_ETHER_VLAN,
1055 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4 |
1057 RTE_PTYPE_L2_ETHER_VLAN,
1058 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6 |
1060 RTE_PTYPE_L2_ETHER_VLAN,
1061 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4 |
1063 RTE_PTYPE_L2_ETHER_VLAN,
1064 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6 |
1066 RTE_PTYPE_L2_ETHER_VLAN,
1067 /* Frags with VLAN */
1068 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4 |
1070 RTE_PTYPE_L2_ETHER_VLAN,
1071 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6 |
1073 RTE_PTYPE_L2_ETHER_VLAN,
1076 /* Bits (0..3) provides L3/L4 protocol type */
1077 /* Bits (4,5) provides frag and VLAN info */
1078 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1079 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1080 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1081 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1082 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1083 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1084 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1085 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1087 if (val < QEDE_PKT_TYPE_MAX)
1088 return ptype_lkup_tbl[val];
1090 return RTE_PTYPE_UNKNOWN;
1093 static inline uint8_t
1094 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
1096 struct ipv4_hdr *ip;
1101 val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1102 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
1104 if (unlikely(val)) {
1105 m->packet_type = qede_rx_cqe_to_pkt_type(flag);
1106 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
1107 ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
1108 sizeof(struct ether_hdr));
1109 pkt_csum = ip->hdr_checksum;
1110 ip->hdr_checksum = 0;
1111 calc_csum = rte_ipv4_cksum(ip);
1112 ip->hdr_checksum = pkt_csum;
1113 return (calc_csum != pkt_csum);
1114 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
1121 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
1123 ecore_chain_consume(&rxq->rx_bd_ring);
1128 qede_reuse_page(__rte_unused struct qede_dev *qdev,
1129 struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
1131 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
1132 uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1133 struct qede_rx_entry *curr_prod;
1134 dma_addr_t new_mapping;
1136 curr_prod = &rxq->sw_rx_ring[idx];
1137 *curr_prod = *curr_cons;
1139 new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
1140 curr_prod->page_offset;
1142 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
1143 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
1149 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
1150 struct qede_dev *qdev, uint8_t count)
1152 struct qede_rx_entry *curr_cons;
1154 for (; count > 0; count--) {
1155 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
1156 qede_reuse_page(qdev, rxq, curr_cons);
1157 qede_rx_bd_ring_consume(rxq);
1162 qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
1163 struct qede_rx_queue *rxq,
1164 uint8_t agg_index, uint16_t len)
1166 struct qede_agg_info *tpa_info;
1167 struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
1170 /* Under certain conditions it is possible that FW may not consume
1171 * additional or new BD. So decision to consume the BD must be made
1172 * based on len_list[0].
1174 if (rte_le_to_cpu_16(len)) {
1175 tpa_info = &rxq->tpa_info[agg_index];
1176 cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1177 curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
1179 curr_frag->nb_segs = 1;
1180 curr_frag->pkt_len = rte_le_to_cpu_16(len);
1181 curr_frag->data_len = curr_frag->pkt_len;
1182 tpa_info->tpa_tail->next = curr_frag;
1183 tpa_info->tpa_tail = curr_frag;
1184 qede_rx_bd_ring_consume(rxq);
1185 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1186 PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
1187 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1188 rxq->rx_alloc_errors++;
1194 qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
1195 struct qede_rx_queue *rxq,
1196 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1198 PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
1199 cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
1200 /* only len_list[0] will have value */
1201 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1206 qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
1207 struct qede_rx_queue *rxq,
1208 struct eth_fast_path_rx_tpa_end_cqe *cqe)
1210 struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
1212 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1214 /* Update total length and frags based on end TPA */
1215 rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
1216 /* TODO: Add Sanity Checks */
1217 rx_mb->nb_segs = cqe->num_of_bds;
1218 rx_mb->pkt_len = cqe->total_packet_len;
1220 PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
1221 " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
1222 rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
1226 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
1231 static const uint32_t
1232 ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
1233 [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
1234 [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
1235 [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
1236 [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
1237 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
1238 RTE_PTYPE_TUNNEL_GENEVE,
1239 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
1240 RTE_PTYPE_TUNNEL_GRE,
1241 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
1242 RTE_PTYPE_TUNNEL_VXLAN,
1243 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
1244 RTE_PTYPE_TUNNEL_GENEVE,
1245 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
1246 RTE_PTYPE_TUNNEL_GRE,
1247 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
1248 RTE_PTYPE_TUNNEL_VXLAN,
1249 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
1250 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1251 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
1252 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1253 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
1254 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1255 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
1256 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1257 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
1258 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1259 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
1260 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1261 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
1262 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1263 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
1264 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1265 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
1266 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1267 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
1268 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1269 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
1270 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1271 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
1272 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1275 /* Cover bits[4-0] to include tunn_type and next protocol */
1276 val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
1277 ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
1278 (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
1279 ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
1281 if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
1282 return ptype_tunn_lkup_tbl[val];
1284 return RTE_PTYPE_UNKNOWN;
1288 qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
1289 uint8_t num_segs, uint16_t pkt_len)
1291 struct qede_rx_queue *rxq = p_rxq;
1292 struct qede_dev *qdev = rxq->qdev;
1293 register struct rte_mbuf *seg1 = NULL;
1294 register struct rte_mbuf *seg2 = NULL;
1295 uint16_t sw_rx_index;
1300 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1302 if (unlikely(!cur_size)) {
1303 PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
1304 " left for mapping jumbo\n", num_segs);
1305 qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
1308 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1309 seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
1310 qede_rx_bd_ring_consume(rxq);
1311 pkt_len -= cur_size;
1312 seg2->data_len = cur_size;
1322 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1324 print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
1327 PMD_RX_LOG(INFO, rxq,
1328 "len 0x%04x bf 0x%04x hash_val 0x%x"
1329 " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
1330 " inner_l2=%s inner_l3=%s inner_l4=%s\n",
1331 m->data_len, bitfield, m->hash.rss,
1332 (unsigned long)m->ol_flags,
1333 rte_get_ptype_l2_name(m->packet_type),
1334 rte_get_ptype_l3_name(m->packet_type),
1335 rte_get_ptype_l4_name(m->packet_type),
1336 rte_get_ptype_tunnel_name(m->packet_type),
1337 rte_get_ptype_inner_l2_name(m->packet_type),
1338 rte_get_ptype_inner_l3_name(m->packet_type),
1339 rte_get_ptype_inner_l4_name(m->packet_type));
1344 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1346 struct qede_rx_queue *rxq = p_rxq;
1347 struct qede_dev *qdev = rxq->qdev;
1348 struct ecore_dev *edev = &qdev->edev;
1349 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
1350 uint16_t rx_pkt = 0;
1351 union eth_rx_cqe *cqe;
1352 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1353 register struct rte_mbuf *rx_mb = NULL;
1354 register struct rte_mbuf *seg1 = NULL;
1355 enum eth_rx_cqe_type cqe_type;
1356 uint16_t pkt_len = 0; /* Sum of all BD segments */
1357 uint16_t len; /* Length of first BD */
1358 uint8_t num_segs = 1;
1359 uint16_t preload_idx;
1360 uint16_t parse_flag;
1361 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1362 uint8_t bitfield_val;
1364 uint8_t tunn_parse_flag;
1366 struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
1368 uint32_t packet_type;
1371 uint8_t offset, tpa_agg_idx, flags;
1372 struct qede_agg_info *tpa_info = NULL;
1375 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1376 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1380 if (hw_comp_cons == sw_comp_cons)
1383 while (sw_comp_cons != hw_comp_cons) {
1385 packet_type = RTE_PTYPE_UNKNOWN;
1387 tpa_start_flg = false;
1390 /* Get the CQE from the completion ring */
1392 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1393 cqe_type = cqe->fast_path_regular.type;
1394 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1397 case ETH_RX_CQE_TYPE_REGULAR:
1398 fp_cqe = &cqe->fast_path_regular;
1400 case ETH_RX_CQE_TYPE_TPA_START:
1401 cqe_start_tpa = &cqe->fast_path_tpa_start;
1402 tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
1403 tpa_start_flg = true;
1404 /* Mark it as LRO packet */
1405 ol_flags |= PKT_RX_LRO;
1406 /* In split mode, seg_len is same as len_on_first_bd
1407 * and ext_bd_len_list will be empty since there are
1408 * no additional buffers
1410 PMD_RX_LOG(INFO, rxq,
1411 "TPA start[%d] - len_on_first_bd %d header %d"
1412 " [bd_list[0] %d], [seg_len %d]\n",
1413 cqe_start_tpa->tpa_agg_index,
1414 rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
1415 cqe_start_tpa->header_len,
1416 rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
1417 rte_le_to_cpu_16(cqe_start_tpa->seg_len));
1420 case ETH_RX_CQE_TYPE_TPA_CONT:
1421 qede_rx_process_tpa_cont_cqe(qdev, rxq,
1422 &cqe->fast_path_tpa_cont);
1424 case ETH_RX_CQE_TYPE_TPA_END:
1425 qede_rx_process_tpa_end_cqe(qdev, rxq,
1426 &cqe->fast_path_tpa_end);
1427 tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
1428 tpa_info = &rxq->tpa_info[tpa_agg_idx];
1429 rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
1431 case ETH_RX_CQE_TYPE_SLOW_PATH:
1432 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1433 ecore_eth_cqe_completion(
1434 &edev->hwfns[rxq->queue_id % edev->num_hwfns],
1435 (struct eth_slow_path_rx_cqe *)cqe);
1441 /* Get the data from the SW ring */
1442 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1443 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
1444 assert(rx_mb != NULL);
1446 /* Handle regular CQE or TPA start CQE */
1447 if (!tpa_start_flg) {
1448 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1449 offset = fp_cqe->placement_offset;
1450 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1451 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1452 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1453 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1454 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1455 bitfield_val = fp_cqe->bitfields;
1459 rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
1460 offset = cqe_start_tpa->placement_offset;
1461 /* seg_len = len_on_first_bd */
1462 len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
1463 vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
1464 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1465 bitfield_val = cqe_start_tpa->bitfields;
1467 rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
1469 if (qede_tunn_exist(parse_flag)) {
1470 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1471 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1472 PMD_RX_LOG(ERR, rxq,
1473 "L4 csum failed, flags = 0x%x\n",
1475 rxq->rx_hw_errors++;
1476 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1478 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1481 if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1482 PMD_RX_LOG(ERR, rxq,
1483 "Outer L3 csum failed, flags = 0x%x\n",
1485 rxq->rx_hw_errors++;
1486 ol_flags |= PKT_RX_EIP_CKSUM_BAD;
1488 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1492 flags = cqe_start_tpa->tunnel_pars_flags.flags;
1494 flags = fp_cqe->tunnel_pars_flags.flags;
1495 tunn_parse_flag = flags;
1499 qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
1503 qede_rx_cqe_to_pkt_type_inner(parse_flag);
1505 /* Outer L3/L4 types is not available in CQE */
1506 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1508 /* Outer L3/L4 types is not available in CQE.
1509 * Need to add offset to parse correctly,
1511 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1512 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1514 packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1517 /* Common handling for non-tunnel packets and for inner
1518 * headers in the case of tunnel.
1520 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1521 PMD_RX_LOG(ERR, rxq,
1522 "L4 csum failed, flags = 0x%x\n",
1524 rxq->rx_hw_errors++;
1525 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1527 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1529 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1530 PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1532 rxq->rx_hw_errors++;
1533 ol_flags |= PKT_RX_IP_CKSUM_BAD;
1535 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1538 if (CQE_HAS_VLAN(parse_flag) ||
1539 CQE_HAS_OUTER_VLAN(parse_flag)) {
1540 /* Note: FW doesn't indicate Q-in-Q packet */
1541 ol_flags |= PKT_RX_VLAN;
1542 if (qdev->vlan_strip_flg) {
1543 ol_flags |= PKT_RX_VLAN_STRIPPED;
1544 rx_mb->vlan_tci = vlan_tci;
1549 if (qdev->rss_enable) {
1550 ol_flags |= PKT_RX_RSS_HASH;
1551 rx_mb->hash.rss = rss_hash;
1554 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1555 PMD_RX_LOG(ERR, rxq,
1556 "New buffer allocation failed,"
1557 "dropping incoming packet\n");
1558 qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
1559 rte_eth_devices[rxq->port_id].
1560 data->rx_mbuf_alloc_failed++;
1561 rxq->rx_alloc_errors++;
1564 qede_rx_bd_ring_consume(rxq);
1566 if (!tpa_start_flg && fp_cqe->bd_num > 1) {
1567 PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
1568 " len on first: %04x Total Len: %04x",
1569 fp_cqe->bd_num, len, pkt_len);
1570 num_segs = fp_cqe->bd_num - 1;
1572 if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
1575 for (j = 0; j < num_segs; j++) {
1576 if (qede_alloc_rx_buffer(rxq)) {
1577 PMD_RX_LOG(ERR, rxq,
1578 "Buffer allocation failed");
1579 rte_eth_devices[rxq->port_id].
1580 data->rx_mbuf_alloc_failed++;
1581 rxq->rx_alloc_errors++;
1587 rxq->rx_segs++; /* for the first segment */
1589 /* Prefetch next mbuf while processing current one. */
1590 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1591 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
1593 /* Update rest of the MBUF fields */
1594 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1595 rx_mb->port = rxq->port_id;
1596 rx_mb->ol_flags = ol_flags;
1597 rx_mb->data_len = len;
1598 rx_mb->packet_type = packet_type;
1599 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1600 print_rx_bd_info(rx_mb, rxq, bitfield_val);
1602 if (!tpa_start_flg) {
1603 rx_mb->nb_segs = fp_cqe->bd_num;
1604 rx_mb->pkt_len = pkt_len;
1606 /* store ref to the updated mbuf */
1607 tpa_info->tpa_head = rx_mb;
1608 tpa_info->tpa_tail = tpa_info->tpa_head;
1610 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
1612 if (!tpa_start_flg) {
1613 rx_pkts[rx_pkt] = rx_mb;
1617 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1618 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1619 if (rx_pkt == nb_pkts) {
1620 PMD_RX_LOG(DEBUG, rxq,
1621 "Budget reached nb_pkts=%u received=%u",
1627 qede_update_rx_prod(qdev, rxq);
1629 rxq->rcv_pkts += rx_pkt;
1631 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
1637 /* Populate scatter gather buffer descriptor fields */
1638 static inline uint16_t
1639 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
1640 struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
1643 struct qede_tx_queue *txq = p_txq;
1644 struct eth_tx_bd *tx_bd = NULL;
1646 uint16_t nb_segs = 0;
1648 /* Check for scattered buffers */
1650 if (start_seg == 0) {
1652 *bd2 = (struct eth_tx_2nd_bd *)
1653 ecore_chain_produce(&txq->tx_pbl);
1654 memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
1657 mapping = rte_mbuf_data_iova(m_seg);
1658 QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
1659 PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
1660 } else if (start_seg == 1) {
1662 *bd3 = (struct eth_tx_3rd_bd *)
1663 ecore_chain_produce(&txq->tx_pbl);
1664 memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
1667 mapping = rte_mbuf_data_iova(m_seg);
1668 QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
1669 PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
1671 tx_bd = (struct eth_tx_bd *)
1672 ecore_chain_produce(&txq->tx_pbl);
1673 memset(tx_bd, 0, sizeof(*tx_bd));
1675 mapping = rte_mbuf_data_iova(m_seg);
1676 QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
1677 PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
1680 m_seg = m_seg->next;
1683 /* Return total scattered buffers */
1687 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1689 print_tx_bd_info(struct qede_tx_queue *txq,
1690 struct eth_tx_1st_bd *bd1,
1691 struct eth_tx_2nd_bd *bd2,
1692 struct eth_tx_3rd_bd *bd3,
1693 uint64_t tx_ol_flags)
1695 char ol_buf[256] = { 0 }; /* for verbose prints */
1698 PMD_TX_LOG(INFO, txq,
1699 "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
1700 rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
1701 bd1->data.bd_flags.bitfields,
1702 rte_cpu_to_le_16(bd1->data.bitfields));
1704 PMD_TX_LOG(INFO, txq,
1705 "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
1706 rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
1707 bd2->data.bitfields2, bd2->data.tunn_ip_size);
1709 PMD_TX_LOG(INFO, txq,
1710 "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
1711 "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
1712 rte_cpu_to_le_16(bd3->nbytes),
1713 rte_cpu_to_le_16(bd3->data.bitfields),
1714 rte_cpu_to_le_16(bd3->data.lso_mss),
1715 bd3->data.tunn_l4_hdr_start_offset_w,
1716 bd3->data.tunn_hdr_size_w);
1718 rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
1719 PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
1723 /* TX prepare to check packets meets TX conditions */
1725 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1726 qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
1729 struct qede_tx_queue *txq = p_txq;
1731 qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
1738 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1742 for (i = 0; i < nb_pkts; i++) {
1744 ol_flags = m->ol_flags;
1745 if (ol_flags & PKT_TX_TCP_SEG) {
1746 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
1747 rte_errno = -EINVAL;
1750 /* TBD: confirm its ~9700B for both ? */
1751 if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
1752 rte_errno = -EINVAL;
1756 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
1757 rte_errno = -EINVAL;
1761 if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
1762 rte_errno = -ENOTSUP;
1766 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1767 ret = rte_validate_tx_offload(m);
1775 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1776 if (unlikely(i != nb_pkts))
1777 PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
1783 #define MPLSINUDP_HDR_SIZE (12)
1785 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1787 qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
1788 struct qede_tx_queue *txq)
1790 if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
1791 PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
1792 if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
1793 MPLSINUDP_HDR_SIZE) / 2) > 0xff)
1794 PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
1795 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
1796 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
1797 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
1798 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
1799 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
1800 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
1805 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1807 struct qede_tx_queue *txq = p_txq;
1808 struct qede_dev *qdev = txq->qdev;
1809 struct ecore_dev *edev = &qdev->edev;
1810 struct rte_mbuf *mbuf;
1811 struct rte_mbuf *m_seg = NULL;
1812 uint16_t nb_tx_pkts;
1816 uint16_t nb_pkt_sent = 0;
1820 __rte_unused bool tunn_flg;
1821 bool tunn_ipv6_ext_flg;
1822 struct eth_tx_1st_bd *bd1;
1823 struct eth_tx_2nd_bd *bd2;
1824 struct eth_tx_3rd_bd *bd3;
1825 uint64_t tx_ol_flags;
1829 uint8_t bd1_bd_flags_bf;
1838 uint8_t tunn_l4_hdr_start_offset;
1839 uint8_t tunn_hdr_size;
1840 uint8_t inner_l2_hdr_size;
1841 uint16_t inner_l4_hdr_offset;
1843 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
1844 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
1845 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
1846 qede_process_tx_compl(edev, txq);
1849 nb_tx_pkts = nb_pkts;
1850 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1851 while (nb_tx_pkts--) {
1852 /* Init flags/values */
1862 bd1_bd_flags_bf = 0;
1867 mplsoudp_flg = false;
1868 tunn_ipv6_ext_flg = false;
1870 tunn_l4_hdr_start_offset = 0;
1875 /* Check minimum TX BDS availability against available BDs */
1876 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
1879 tx_ol_flags = mbuf->ol_flags;
1880 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1882 /* TX prepare would have already checked supported tunnel Tx
1883 * offloads. Don't rely on pkt_type marked by Rx, instead use
1884 * tx_ol_flags to decide.
1886 tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
1889 /* Check against max which is Tunnel IPv6 + ext */
1890 if (unlikely(txq->nb_tx_avail <
1891 ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
1894 /* First indicate its a tunnel pkt */
1895 bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
1896 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1897 /* Legacy FW had flipped behavior in regard to this bit
1898 * i.e. it needed to set to prevent FW from touching
1899 * encapsulated packets when it didn't need to.
1901 if (unlikely(txq->is_legacy)) {
1903 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1906 /* Outer IP checksum offload */
1907 if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
1908 PKT_TX_OUTER_IPV4)) {
1910 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
1911 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1915 * Currently, only inner checksum offload in MPLS-in-UDP
1916 * tunnel with one MPLS label is supported. Both outer
1917 * and inner layers lengths need to be provided in
1920 if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
1921 PKT_TX_TUNNEL_MPLSINUDP) {
1922 mplsoudp_flg = true;
1923 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1924 qede_mpls_tunn_tx_sanity_check(mbuf, txq);
1926 /* Outer L4 offset in two byte words */
1927 tunn_l4_hdr_start_offset =
1928 (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
1929 /* Tunnel header size in two byte words */
1930 tunn_hdr_size = (mbuf->outer_l2_len +
1931 mbuf->outer_l3_len +
1932 MPLSINUDP_HDR_SIZE) / 2;
1933 /* Inner L2 header size in two byte words */
1934 inner_l2_hdr_size = (mbuf->l2_len -
1935 MPLSINUDP_HDR_SIZE) / 2;
1936 /* Inner L4 header offset from the beggining
1937 * of inner packet in two byte words
1939 inner_l4_hdr_offset = (mbuf->l2_len -
1940 MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
1942 /* Inner L2 size and address type */
1943 bd2_bf1 |= (inner_l2_hdr_size &
1944 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
1945 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
1946 bd2_bf1 |= (UNICAST_ADDRESS &
1947 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
1948 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
1949 /* Treated as IPv6+Ext */
1951 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
1953 /* Mark inner IPv6 if present */
1954 if (tx_ol_flags & PKT_TX_IPV6)
1956 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
1958 /* Inner L4 offsets */
1959 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
1960 (tx_ol_flags & (PKT_TX_UDP_CKSUM |
1961 PKT_TX_TCP_CKSUM))) {
1962 /* Determines if BD3 is needed */
1963 tunn_ipv6_ext_flg = true;
1964 if ((tx_ol_flags & PKT_TX_L4_MASK) ==
1967 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
1970 /* TODO other pseudo checksum modes are
1974 ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
1975 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
1976 bd2_bf2 |= (inner_l4_hdr_offset &
1977 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
1978 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
1980 } /* End MPLSoUDP */
1981 } /* End Tunnel handling */
1983 if (tx_ol_flags & PKT_TX_TCP_SEG) {
1985 if (unlikely(txq->nb_tx_avail <
1986 ETH_TX_MIN_BDS_PER_LSO_PKT))
1988 /* For LSO, packet header and payload must reside on
1989 * buffers pointed by different BDs. Using BD1 for HDR
1990 * and BD2 onwards for data.
1992 hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
1994 hdr_size += mbuf->outer_l2_len +
1997 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
1999 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2000 /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
2002 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2003 mss = rte_cpu_to_le_16(mbuf->tso_segsz);
2004 /* Using one header BD */
2005 bd3_bf |= rte_cpu_to_le_16(1 <<
2006 ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2008 if (unlikely(txq->nb_tx_avail <
2009 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2012 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2013 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2016 /* Descriptor based VLAN insertion */
2017 if (tx_ol_flags & PKT_TX_VLAN_PKT) {
2018 vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
2020 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
2023 /* Offload the IP checksum in the hardware */
2024 if (tx_ol_flags & PKT_TX_IP_CKSUM) {
2026 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2027 /* There's no DPDK flag to request outer-L4 csum
2028 * offload. But in the case of tunnel if inner L3 or L4
2029 * csum offload is requested then we need to force
2030 * recalculation of L4 tunnel header csum also.
2032 if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
2033 PKT_TX_TUNNEL_GRE)) {
2035 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2036 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2040 /* L4 checksum offload (tcp or udp) */
2041 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
2042 (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
2044 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2045 /* There's no DPDK flag to request outer-L4 csum
2046 * offload. But in the case of tunnel if inner L3 or L4
2047 * csum offload is requested then we need to force
2048 * recalculation of L4 tunnel header csum also.
2052 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2053 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2057 /* Fill the entry in the SW ring and the BDs in the FW ring */
2059 txq->sw_tx_ring[idx].mbuf = mbuf;
2062 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2063 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2066 /* Map MBUF linear data for DMA and set in the BD1 */
2067 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2069 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2070 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2071 bd1->data.vlan = vlan;
2073 if (lso_flg || mplsoudp_flg) {
2074 bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
2076 memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
2080 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2083 QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
2084 rte_mbuf_data_iova(mbuf)),
2085 mbuf->data_len - hdr_size);
2086 bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
2088 bd2->data.bitfields2 =
2089 rte_cpu_to_le_16(bd2_bf2);
2091 bd2->data.tunn_ip_size =
2092 rte_cpu_to_le_16(mbuf->outer_l3_len);
2095 if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
2096 bd3 = (struct eth_tx_3rd_bd *)
2097 ecore_chain_produce(&txq->tx_pbl);
2098 memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
2100 bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
2102 bd3->data.lso_mss = mss;
2104 bd3->data.tunn_l4_hdr_start_offset_w =
2105 tunn_l4_hdr_start_offset;
2106 bd3->data.tunn_hdr_size_w =
2112 /* Handle fragmented MBUF */
2115 /* Encode scatter gather buffer descriptors if required */
2116 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
2117 bd1->data.nbds = nbds + nb_frags;
2119 txq->nb_tx_avail -= bd1->data.nbds;
2121 rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
2123 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2124 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2125 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2131 /* Write value of prod idx into bd_prod */
2132 txq->tx_db.data.bd_prod = bd_prod;
2134 rte_compiler_barrier();
2135 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2138 /* Check again for Tx completions */
2139 qede_process_tx_compl(edev, txq);
2141 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2142 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2148 qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
2149 __rte_unused struct rte_mbuf **pkts,
2150 __rte_unused uint16_t nb_pkts)
2156 /* this function does a fake walk through over completion queue
2157 * to calculate number of BDs used by HW.
2158 * At the end, it restores the state of completion queue.
2161 qede_parse_fp_cqe(struct qede_rx_queue *rxq)
2163 uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
2164 union eth_rx_cqe *cqe, *orig_cqe = NULL;
2166 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
2167 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2169 if (hw_comp_cons == sw_comp_cons)
2172 /* Get the CQE from the completion ring */
2173 cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2176 while (sw_comp_cons != hw_comp_cons) {
2177 switch (cqe->fast_path_regular.type) {
2178 case ETH_RX_CQE_TYPE_REGULAR:
2179 bd_count += cqe->fast_path_regular.bd_num;
2181 case ETH_RX_CQE_TYPE_TPA_END:
2182 bd_count += cqe->fast_path_tpa_end.num_of_bds;
2189 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2190 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2193 /* revert comp_ring to original state */
2194 ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
2200 qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
2202 uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
2203 uint16_t produced, consumed;
2204 struct qede_rx_queue *rxq = p_rxq;
2206 if (offset > rxq->nb_rx_desc)
2209 sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
2210 sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
2212 /* find BDs used by HW from completion queue elements */
2213 hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
2215 if (hw_bd_cons < sw_bd_cons)
2216 /* wraparound case */
2217 consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
2219 consumed = hw_bd_cons - sw_bd_cons;
2221 if (offset <= consumed)
2222 return RTE_ETH_RX_DESC_DONE;
2224 if (sw_bd_prod < sw_bd_cons)
2225 /* wraparound case */
2226 produced = (0xffff - sw_bd_cons) + sw_bd_prod;
2228 produced = sw_bd_prod - sw_bd_cons;
2230 if (offset <= produced)
2231 return RTE_ETH_RX_DESC_AVAIL;
2233 return RTE_ETH_RX_DESC_UNAVAIL;