1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
10 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
12 struct rte_mbuf *new_mb = NULL;
13 struct eth_rx_bd *rx_bd;
15 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
17 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
18 if (unlikely(!new_mb)) {
20 "Failed to allocate rx buffer "
21 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
22 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
23 rte_mempool_avail_count(rxq->mb_pool),
24 rte_mempool_in_use_count(rxq->mb_pool));
27 rxq->sw_rx_ring[idx] = new_mb;
28 mapping = rte_mbuf_data_iova_default(new_mb);
29 /* Advance PROD and get BD pointer */
30 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
31 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
32 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
37 #define QEDE_MAX_BULK_ALLOC_COUNT 512
39 static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
41 struct rte_mbuf *mbuf = NULL;
42 struct eth_rx_bd *rx_bd;
46 uint16_t mask = NUM_RX_BDS(rxq);
48 if (count > QEDE_MAX_BULK_ALLOC_COUNT)
49 count = QEDE_MAX_BULK_ALLOC_COUNT;
51 idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
53 if (count > mask - idx + 1)
54 count = mask - idx + 1;
56 ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)&rxq->sw_rx_ring[idx],
61 "Failed to allocate %d rx buffers "
62 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
64 rxq->sw_rx_prod & NUM_RX_BDS(rxq),
65 rxq->sw_rx_cons & NUM_RX_BDS(rxq),
66 rte_mempool_avail_count(rxq->mb_pool),
67 rte_mempool_in_use_count(rxq->mb_pool));
71 for (i = 0; i < count; i++) {
72 rte_prefetch0(rxq->sw_rx_ring[(idx + 1) & NUM_RX_BDS(rxq)]);
73 mbuf = rxq->sw_rx_ring[idx & NUM_RX_BDS(rxq)];
75 mapping = rte_mbuf_data_iova_default(mbuf);
76 rx_bd = (struct eth_rx_bd *)
77 ecore_chain_produce(&rxq->rx_bd_ring);
78 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
79 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
82 rxq->sw_rx_prod = idx;
87 /* Criterias for calculating Rx buffer size -
88 * 1) rx_buf_size should not exceed the size of mbuf
89 * 2) In scattered_rx mode - minimum rx_buf_size should be
90 * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
91 * 3) In regular mode - minimum rx_buf_size should be
92 * (MTU + Maximum L2 Header Size + 2)
93 * In above cases +2 corresponds to 2 bytes padding in front of L2
95 * 4) rx_buf_size should be cacheline-size aligned. So considering
96 * criteria 1, we need to adjust the size to floor instead of ceil,
97 * so that we don't exceed mbuf size while ceiling rx_buf_size.
100 qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
101 uint16_t max_frame_size)
103 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
104 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
107 if (dev->data->scattered_rx) {
108 /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
109 * buffers can be used for single packet. So need to make sure
110 * mbuf size is sufficient enough for this.
112 if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
113 (max_frame_size + QEDE_ETH_OVERHEAD)) {
114 DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
115 mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
119 rx_buf_size = RTE_MAX(mbufsz,
120 (max_frame_size + QEDE_ETH_OVERHEAD) /
121 ETH_RX_MAX_BUFF_PER_PKT);
123 rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
126 /* Align to cache-line size if needed */
127 return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
130 static struct qede_rx_queue *
131 qede_alloc_rx_queue_mem(struct rte_eth_dev *dev,
134 unsigned int socket_id,
135 struct rte_mempool *mp,
138 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
139 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
140 struct qede_rx_queue *rxq;
144 /* First allocate the rx queue data structure */
145 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
146 RTE_CACHE_LINE_SIZE, socket_id);
149 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
156 rxq->nb_rx_desc = nb_desc;
157 rxq->queue_id = queue_idx;
158 rxq->port_id = dev->data->port_id;
161 rxq->rx_buf_size = bufsz;
163 DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
164 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
166 /* Allocate the parallel driver ring for Rx buffers */
167 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
168 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
169 RTE_CACHE_LINE_SIZE, socket_id);
170 if (!rxq->sw_rx_ring) {
171 DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
172 " socket %u\n", socket_id);
177 /* Allocate FW Rx ring */
178 rc = qdev->ops->common->chain_alloc(edev,
179 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
180 ECORE_CHAIN_MODE_NEXT_PTR,
181 ECORE_CHAIN_CNT_TYPE_U16,
183 sizeof(struct eth_rx_bd),
187 if (rc != ECORE_SUCCESS) {
188 DP_ERR(edev, "Memory allocation fails for RX BD ring"
189 " on socket %u\n", socket_id);
190 rte_free(rxq->sw_rx_ring);
195 /* Allocate FW completion ring */
196 rc = qdev->ops->common->chain_alloc(edev,
197 ECORE_CHAIN_USE_TO_CONSUME,
198 ECORE_CHAIN_MODE_PBL,
199 ECORE_CHAIN_CNT_TYPE_U16,
201 sizeof(union eth_rx_cqe),
205 if (rc != ECORE_SUCCESS) {
206 DP_ERR(edev, "Memory allocation fails for RX CQE ring"
207 " on socket %u\n", socket_id);
208 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
209 rte_free(rxq->sw_rx_ring);
218 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
219 uint16_t nb_desc, unsigned int socket_id,
220 __rte_unused const struct rte_eth_rxconf *rx_conf,
221 struct rte_mempool *mp)
223 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
224 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
225 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
226 struct qede_rx_queue *rxq;
227 uint16_t max_rx_pktlen;
231 PMD_INIT_FUNC_TRACE(edev);
233 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
234 if (!rte_is_power_of_2(nb_desc)) {
235 DP_ERR(edev, "Ring size %u is not power of 2\n",
240 /* Free memory prior to re-allocation if needed... */
241 if (dev->data->rx_queues[qid] != NULL) {
242 qede_rx_queue_release(dev->data->rx_queues[qid]);
243 dev->data->rx_queues[qid] = NULL;
246 max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
248 /* Fix up RX buffer size */
249 bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
250 /* cache align the mbuf size to simplify rx_buf_size calculation */
251 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
252 if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
253 (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
254 if (!dev->data->scattered_rx) {
255 DP_INFO(edev, "Forcing scatter-gather mode\n");
256 dev->data->scattered_rx = 1;
260 rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pktlen);
266 if (ECORE_IS_CMT(edev)) {
267 rxq = qede_alloc_rx_queue_mem(dev, qid * 2, nb_desc,
268 socket_id, mp, bufsz);
272 qdev->fp_array[qid * 2].rxq = rxq;
273 rxq = qede_alloc_rx_queue_mem(dev, qid * 2 + 1, nb_desc,
274 socket_id, mp, bufsz);
278 qdev->fp_array[qid * 2 + 1].rxq = rxq;
279 /* provide per engine fp struct as rx queue */
280 dev->data->rx_queues[qid] = &qdev->fp_array_cmt[qid];
282 rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,
283 socket_id, mp, bufsz);
287 dev->data->rx_queues[qid] = rxq;
288 qdev->fp_array[qid].rxq = rxq;
291 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
292 qid, nb_desc, rxq->rx_buf_size, socket_id);
298 qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
299 struct qede_rx_queue *rxq)
301 DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
302 ecore_chain_reset(&rxq->rx_bd_ring);
303 ecore_chain_reset(&rxq->rx_comp_ring);
306 *rxq->hw_cons_ptr = 0;
309 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
313 if (rxq->sw_rx_ring) {
314 for (i = 0; i < rxq->nb_rx_desc; i++) {
315 if (rxq->sw_rx_ring[i]) {
316 rte_pktmbuf_free(rxq->sw_rx_ring[i]);
317 rxq->sw_rx_ring[i] = NULL;
323 static void _qede_rx_queue_release(struct qede_dev *qdev,
324 struct ecore_dev *edev,
325 struct qede_rx_queue *rxq)
327 qede_rx_queue_release_mbufs(rxq);
328 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
329 qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
330 rte_free(rxq->sw_rx_ring);
334 void qede_rx_queue_release(void *rx_queue)
336 struct qede_rx_queue *rxq = rx_queue;
337 struct qede_fastpath_cmt *fp_cmt;
338 struct qede_dev *qdev;
339 struct ecore_dev *edev;
343 edev = QEDE_INIT_EDEV(qdev);
344 PMD_INIT_FUNC_TRACE(edev);
345 if (ECORE_IS_CMT(edev)) {
347 _qede_rx_queue_release(qdev, edev, fp_cmt->fp0->rxq);
348 _qede_rx_queue_release(qdev, edev, fp_cmt->fp1->rxq);
350 _qede_rx_queue_release(qdev, edev, rxq);
355 /* Stops a given RX queue in the HW */
356 static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
358 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
359 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
360 struct ecore_hwfn *p_hwfn;
361 struct qede_rx_queue *rxq;
365 if (rx_queue_id < qdev->num_rx_queues) {
366 rxq = qdev->fp_array[rx_queue_id].rxq;
367 hwfn_index = rx_queue_id % edev->num_hwfns;
368 p_hwfn = &edev->hwfns[hwfn_index];
369 rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
371 if (rc != ECORE_SUCCESS) {
372 DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
375 qede_rx_queue_release_mbufs(rxq);
376 qede_rx_queue_reset(qdev, rxq);
377 eth_dev->data->rx_queue_state[rx_queue_id] =
378 RTE_ETH_QUEUE_STATE_STOPPED;
379 DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
381 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
388 static struct qede_tx_queue *
389 qede_alloc_tx_queue_mem(struct rte_eth_dev *dev,
392 unsigned int socket_id,
393 const struct rte_eth_txconf *tx_conf)
395 struct qede_dev *qdev = dev->data->dev_private;
396 struct ecore_dev *edev = &qdev->edev;
397 struct qede_tx_queue *txq;
399 size_t sw_tx_ring_size;
401 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
402 RTE_CACHE_LINE_SIZE, socket_id);
406 "Unable to allocate memory for txq on socket %u",
411 txq->nb_tx_desc = nb_desc;
413 txq->port_id = dev->data->port_id;
415 rc = qdev->ops->common->chain_alloc(edev,
416 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
417 ECORE_CHAIN_MODE_PBL,
418 ECORE_CHAIN_CNT_TYPE_U16,
420 sizeof(union eth_tx_bd_types),
423 if (rc != ECORE_SUCCESS) {
425 "Unable to allocate memory for txbd ring on socket %u",
427 qede_tx_queue_release(txq);
431 /* Allocate software ring */
432 sw_tx_ring_size = sizeof(txq->sw_tx_ring) * txq->nb_tx_desc;
433 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
435 RTE_CACHE_LINE_SIZE, socket_id);
437 if (!txq->sw_tx_ring) {
439 "Unable to allocate memory for txbd ring on socket %u",
441 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
442 qede_tx_queue_release(txq);
446 txq->queue_id = queue_idx;
448 txq->nb_tx_avail = txq->nb_tx_desc;
450 txq->tx_free_thresh =
451 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
452 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
455 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
456 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
461 qede_tx_queue_setup(struct rte_eth_dev *dev,
464 unsigned int socket_id,
465 const struct rte_eth_txconf *tx_conf)
467 struct qede_dev *qdev = dev->data->dev_private;
468 struct ecore_dev *edev = &qdev->edev;
469 struct qede_tx_queue *txq;
471 PMD_INIT_FUNC_TRACE(edev);
473 if (!rte_is_power_of_2(nb_desc)) {
474 DP_ERR(edev, "Ring size %u is not power of 2\n",
479 /* Free memory prior to re-allocation if needed... */
480 if (dev->data->tx_queues[queue_idx] != NULL) {
481 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
482 dev->data->tx_queues[queue_idx] = NULL;
485 if (ECORE_IS_CMT(edev)) {
486 txq = qede_alloc_tx_queue_mem(dev, queue_idx * 2, nb_desc,
491 qdev->fp_array[queue_idx * 2].txq = txq;
492 txq = qede_alloc_tx_queue_mem(dev, (queue_idx * 2) + 1, nb_desc,
497 qdev->fp_array[(queue_idx * 2) + 1].txq = txq;
498 dev->data->tx_queues[queue_idx] =
499 &qdev->fp_array_cmt[queue_idx];
501 txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,
506 dev->data->tx_queues[queue_idx] = txq;
507 qdev->fp_array[queue_idx].txq = txq;
514 qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
515 struct qede_tx_queue *txq)
517 DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
518 ecore_chain_reset(&txq->tx_pbl);
521 *txq->hw_cons_ptr = 0;
524 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
528 if (txq->sw_tx_ring) {
529 for (i = 0; i < txq->nb_tx_desc; i++) {
530 if (txq->sw_tx_ring[i]) {
531 rte_pktmbuf_free(txq->sw_tx_ring[i]);
532 txq->sw_tx_ring[i] = NULL;
538 static void _qede_tx_queue_release(struct qede_dev *qdev,
539 struct ecore_dev *edev,
540 struct qede_tx_queue *txq)
542 qede_tx_queue_release_mbufs(txq);
543 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
544 rte_free(txq->sw_tx_ring);
548 void qede_tx_queue_release(void *tx_queue)
550 struct qede_tx_queue *txq = tx_queue;
551 struct qede_fastpath_cmt *fp_cmt;
552 struct qede_dev *qdev;
553 struct ecore_dev *edev;
557 edev = QEDE_INIT_EDEV(qdev);
558 PMD_INIT_FUNC_TRACE(edev);
560 if (ECORE_IS_CMT(edev)) {
562 _qede_tx_queue_release(qdev, edev, fp_cmt->fp0->txq);
563 _qede_tx_queue_release(qdev, edev, fp_cmt->fp1->txq);
565 _qede_tx_queue_release(qdev, edev, txq);
570 /* This function allocates fast-path status block memory */
572 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
575 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
576 struct status_block *sb_virt;
580 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
581 sizeof(struct status_block));
583 DP_ERR(edev, "Status block allocation failed\n");
586 rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
589 DP_ERR(edev, "Status block initialization failed\n");
590 OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
591 sizeof(struct status_block));
598 int qede_alloc_fp_resc(struct qede_dev *qdev)
600 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
601 struct qede_fastpath *fp;
606 PMD_INIT_FUNC_TRACE(edev);
609 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
611 num_sbs = ecore_cxt_get_proto_cid_count
612 (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
615 DP_ERR(edev, "No status blocks available\n");
619 qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
620 sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
622 if (!qdev->fp_array) {
623 DP_ERR(edev, "fp array allocation failed\n");
627 memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
628 sizeof(*qdev->fp_array));
630 if (ECORE_IS_CMT(edev)) {
631 qdev->fp_array_cmt = rte_calloc("fp_cmt",
632 QEDE_RXTX_MAX(qdev) / 2,
633 sizeof(*qdev->fp_array_cmt),
634 RTE_CACHE_LINE_SIZE);
636 if (!qdev->fp_array_cmt) {
637 DP_ERR(edev, "fp array for CMT allocation failed\n");
641 memset((void *)qdev->fp_array_cmt, 0,
642 (QEDE_RXTX_MAX(qdev) / 2) * sizeof(*qdev->fp_array_cmt));
644 /* Establish the mapping of fp_array with fp_array_cmt */
645 for (i = 0; i < QEDE_RXTX_MAX(qdev) / 2; i++) {
646 qdev->fp_array_cmt[i].qdev = qdev;
647 qdev->fp_array_cmt[i].fp0 = &qdev->fp_array[i * 2];
648 qdev->fp_array_cmt[i].fp1 = &qdev->fp_array[i * 2 + 1];
652 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
653 fp = &qdev->fp_array[sb_idx];
654 fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
655 RTE_CACHE_LINE_SIZE);
657 DP_ERR(edev, "FP sb_info allocation fails\n");
660 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
661 DP_ERR(edev, "FP status block allocation fails\n");
664 DP_INFO(edev, "sb_info idx 0x%x initialized\n",
665 fp->sb_info->igu_sb_id);
671 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
673 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
674 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
675 struct qede_fastpath *fp;
679 PMD_INIT_FUNC_TRACE(edev);
681 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
682 fp = &qdev->fp_array[sb_idx];
684 DP_INFO(edev, "Free sb_info index 0x%x\n",
685 fp->sb_info->igu_sb_id);
686 OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
687 fp->sb_info->sb_phys,
688 sizeof(struct status_block));
689 rte_free(fp->sb_info);
694 /* Free packet buffers and ring memories */
695 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
696 if (eth_dev->data->rx_queues[i]) {
697 qede_rx_queue_release(eth_dev->data->rx_queues[i]);
698 eth_dev->data->rx_queues[i] = NULL;
702 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
703 if (eth_dev->data->tx_queues[i]) {
704 qede_tx_queue_release(eth_dev->data->tx_queues[i]);
705 eth_dev->data->tx_queues[i] = NULL;
709 rte_free(qdev->fp_array);
710 qdev->fp_array = NULL;
712 rte_free(qdev->fp_array_cmt);
713 qdev->fp_array_cmt = NULL;
717 qede_update_rx_prod(__rte_unused struct qede_dev *edev,
718 struct qede_rx_queue *rxq)
720 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
721 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
722 struct eth_rx_prod_data rx_prods;
724 /* Update producers */
725 memset(&rx_prods, 0, sizeof(rx_prods));
726 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
727 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
729 /* Make sure that the BD and SGE data is updated before updating the
730 * producers since FW might read the BD/SGE right after the producer
735 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
736 (uint32_t *)&rx_prods);
738 /* mmiowb is needed to synchronize doorbell writes from more than one
739 * processor. It guarantees that the write arrives to the device before
740 * the napi lock is released and another qede_poll is called (possibly
741 * on another CPU). Without this barrier, the next doorbell can bypass
742 * this doorbell. This is applicable to IA64/Altix systems.
746 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
749 /* Starts a given RX queue in HW */
751 qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
753 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
754 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
755 struct ecore_queue_start_common_params params;
756 struct ecore_rxq_start_ret_params ret_params;
757 struct qede_rx_queue *rxq;
758 struct qede_fastpath *fp;
759 struct ecore_hwfn *p_hwfn;
760 dma_addr_t p_phys_table;
766 if (rx_queue_id < qdev->num_rx_queues) {
767 fp = &qdev->fp_array[rx_queue_id];
769 /* Allocate buffers for the Rx ring */
770 for (j = 0; j < rxq->nb_rx_desc; j++) {
771 rc = qede_alloc_rx_buffer(rxq);
773 DP_ERR(edev, "RX buffer allocation failed"
774 " for rxq = %u\n", rx_queue_id);
778 /* disable interrupts */
779 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
781 memset(¶ms, 0, sizeof(params));
782 params.queue_id = rx_queue_id / edev->num_hwfns;
784 params.stats_id = params.vport_id;
785 params.p_sb = fp->sb_info;
786 DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
787 fp->rxq->queue_id, fp->sb_info->igu_sb_id);
788 params.sb_idx = RX_PI;
789 hwfn_index = rx_queue_id % edev->num_hwfns;
790 p_hwfn = &edev->hwfns[hwfn_index];
791 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
792 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
793 memset(&ret_params, 0, sizeof(ret_params));
794 rc = ecore_eth_rx_queue_start(p_hwfn,
795 p_hwfn->hw_info.opaque_fid,
796 ¶ms, fp->rxq->rx_buf_size,
797 fp->rxq->rx_bd_ring.p_phys_addr,
798 p_phys_table, page_cnt,
801 DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
805 /* Update with the returned parameters */
806 fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
807 fp->rxq->handle = ret_params.p_handle;
809 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[RX_PI];
810 qede_update_rx_prod(qdev, fp->rxq);
811 eth_dev->data->rx_queue_state[rx_queue_id] =
812 RTE_ETH_QUEUE_STATE_STARTED;
813 DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
815 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
823 qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
825 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
826 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
827 struct ecore_queue_start_common_params params;
828 struct ecore_txq_start_ret_params ret_params;
829 struct ecore_hwfn *p_hwfn;
830 dma_addr_t p_phys_table;
831 struct qede_tx_queue *txq;
832 struct qede_fastpath *fp;
837 if (tx_queue_id < qdev->num_tx_queues) {
838 fp = &qdev->fp_array[tx_queue_id];
840 memset(¶ms, 0, sizeof(params));
841 params.queue_id = tx_queue_id / edev->num_hwfns;
843 params.stats_id = params.vport_id;
844 params.p_sb = fp->sb_info;
845 DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
846 fp->txq->queue_id, fp->sb_info->igu_sb_id);
847 params.sb_idx = TX_PI(0); /* tc = 0 */
848 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
849 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
850 hwfn_index = tx_queue_id % edev->num_hwfns;
851 p_hwfn = &edev->hwfns[hwfn_index];
852 if (qdev->dev_info.is_legacy)
853 fp->txq->is_legacy = true;
854 rc = ecore_eth_tx_queue_start(p_hwfn,
855 p_hwfn->hw_info.opaque_fid,
857 p_phys_table, page_cnt,
859 if (rc != ECORE_SUCCESS) {
860 DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
864 txq->doorbell_addr = ret_params.p_doorbell;
865 txq->handle = ret_params.p_handle;
867 txq->hw_cons_ptr = &fp->sb_info->sb_pi_array[TX_PI(0)];
868 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
870 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
872 SET_FIELD(txq->tx_db.data.params,
873 ETH_DB_DATA_AGG_VAL_SEL,
874 DQ_XCM_ETH_TX_BD_PROD_CMD);
875 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
876 eth_dev->data->tx_queue_state[tx_queue_id] =
877 RTE_ETH_QUEUE_STATE_STARTED;
878 DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
880 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
888 qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
889 struct qede_tx_queue *txq)
895 struct rte_mbuf *mbuf;
900 rte_compiler_barrier();
901 rte_prefetch0(txq->hw_cons_ptr);
902 sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
903 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
904 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
905 PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
906 abs(hw_bd_cons - sw_tx_cons));
909 mask = NUM_TX_BDS(txq);
910 idx = txq->sw_tx_cons & mask;
912 remaining = hw_bd_cons - sw_tx_cons;
913 txq->nb_tx_avail += remaining;
917 mbuf = txq->sw_tx_ring[idx];
919 nb_segs = mbuf->nb_segs;
920 remaining -= nb_segs;
922 /* Prefetch the next mbuf. Note that at least the last 4 mbufs
923 * that are prefetched will not be used in the current call.
925 rte_mbuf_prefetch_part1(txq->sw_tx_ring[(idx + 4) & mask]);
926 rte_mbuf_prefetch_part2(txq->sw_tx_ring[(idx + 4) & mask]);
928 PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
931 ecore_chain_consume(&txq->tx_pbl);
935 idx = (idx + 1) & mask;
936 PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
938 txq->sw_tx_cons = idx;
940 if (first_idx > idx) {
941 rte_pktmbuf_free_bulk(&txq->sw_tx_ring[first_idx],
942 mask - first_idx + 1);
943 rte_pktmbuf_free_bulk(&txq->sw_tx_ring[0], idx);
945 rte_pktmbuf_free_bulk(&txq->sw_tx_ring[first_idx],
950 static int qede_drain_txq(struct qede_dev *qdev,
951 struct qede_tx_queue *txq, bool allow_drain)
953 struct ecore_dev *edev = &qdev->edev;
956 while (txq->sw_tx_cons != txq->sw_tx_prod) {
957 qede_process_tx_compl(edev, txq);
960 DP_ERR(edev, "Tx queue[%u] is stuck,"
961 "requesting MCP to drain\n",
963 rc = qdev->ops->common->drain(edev);
966 return qede_drain_txq(qdev, txq, false);
968 DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
969 "PROD=%d, CONS=%d\n",
970 txq->queue_id, txq->sw_tx_prod,
976 rte_compiler_barrier();
979 /* FW finished processing, wait for HW to transmit all tx packets */
985 /* Stops a given TX queue in the HW */
986 static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
988 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
989 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
990 struct ecore_hwfn *p_hwfn;
991 struct qede_tx_queue *txq;
995 if (tx_queue_id < qdev->num_tx_queues) {
996 txq = qdev->fp_array[tx_queue_id].txq;
998 if (qede_drain_txq(qdev, txq, true))
999 return -1; /* For the lack of retcodes */
1001 hwfn_index = tx_queue_id % edev->num_hwfns;
1002 p_hwfn = &edev->hwfns[hwfn_index];
1003 rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
1004 if (rc != ECORE_SUCCESS) {
1005 DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
1008 qede_tx_queue_release_mbufs(txq);
1009 qede_tx_queue_reset(qdev, txq);
1010 eth_dev->data->tx_queue_state[tx_queue_id] =
1011 RTE_ETH_QUEUE_STATE_STOPPED;
1012 DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
1014 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
1021 int qede_start_queues(struct rte_eth_dev *eth_dev)
1023 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1027 for (id = 0; id < qdev->num_rx_queues; id++) {
1028 rc = qede_rx_queue_start(eth_dev, id);
1029 if (rc != ECORE_SUCCESS)
1033 for (id = 0; id < qdev->num_tx_queues; id++) {
1034 rc = qede_tx_queue_start(eth_dev, id);
1035 if (rc != ECORE_SUCCESS)
1042 void qede_stop_queues(struct rte_eth_dev *eth_dev)
1044 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1047 /* Stopping RX/TX queues */
1048 for (id = 0; id < qdev->num_tx_queues; id++)
1049 qede_tx_queue_stop(eth_dev, id);
1051 for (id = 0; id < qdev->num_rx_queues; id++)
1052 qede_rx_queue_stop(eth_dev, id);
1055 static inline bool qede_tunn_exist(uint16_t flag)
1057 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
1058 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
1061 static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
1063 return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
1064 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
1068 * qede_check_tunn_csum_l4:
1070 * 1 : If L4 csum is enabled AND if the validation has failed.
1073 static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
1075 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
1076 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
1077 return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
1078 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
1083 static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
1085 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1086 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
1087 return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1088 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
1093 /* Returns outer L2, L3 and L4 packet_type for tunneled packets */
1094 static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
1096 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
1097 struct rte_ether_hdr *eth_hdr;
1098 struct rte_ipv4_hdr *ipv4_hdr;
1099 struct rte_ipv6_hdr *ipv6_hdr;
1100 struct rte_vlan_hdr *vlan_hdr;
1102 bool vlan_tagged = 0;
1105 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1106 len = sizeof(struct rte_ether_hdr);
1107 ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
1109 /* Note: Valid only if VLAN stripping is disabled */
1110 if (ethertype == RTE_ETHER_TYPE_VLAN) {
1112 vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
1113 len += sizeof(struct rte_vlan_hdr);
1114 ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
1117 if (ethertype == RTE_ETHER_TYPE_IPV4) {
1118 packet_type |= RTE_PTYPE_L3_IPV4;
1119 ipv4_hdr = rte_pktmbuf_mtod_offset(m,
1120 struct rte_ipv4_hdr *, len);
1121 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
1122 packet_type |= RTE_PTYPE_L4_TCP;
1123 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
1124 packet_type |= RTE_PTYPE_L4_UDP;
1125 } else if (ethertype == RTE_ETHER_TYPE_IPV6) {
1126 packet_type |= RTE_PTYPE_L3_IPV6;
1127 ipv6_hdr = rte_pktmbuf_mtod_offset(m,
1128 struct rte_ipv6_hdr *, len);
1129 if (ipv6_hdr->proto == IPPROTO_TCP)
1130 packet_type |= RTE_PTYPE_L4_TCP;
1131 else if (ipv6_hdr->proto == IPPROTO_UDP)
1132 packet_type |= RTE_PTYPE_L4_UDP;
1136 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
1138 packet_type |= RTE_PTYPE_L2_ETHER;
1143 static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
1148 static const uint32_t
1149 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1150 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4 |
1151 RTE_PTYPE_INNER_L2_ETHER,
1152 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6 |
1153 RTE_PTYPE_INNER_L2_ETHER,
1154 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4 |
1155 RTE_PTYPE_INNER_L4_TCP |
1156 RTE_PTYPE_INNER_L2_ETHER,
1157 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6 |
1158 RTE_PTYPE_INNER_L4_TCP |
1159 RTE_PTYPE_INNER_L2_ETHER,
1160 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4 |
1161 RTE_PTYPE_INNER_L4_UDP |
1162 RTE_PTYPE_INNER_L2_ETHER,
1163 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6 |
1164 RTE_PTYPE_INNER_L4_UDP |
1165 RTE_PTYPE_INNER_L2_ETHER,
1166 /* Frags with no VLAN */
1167 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1168 RTE_PTYPE_INNER_L4_FRAG |
1169 RTE_PTYPE_INNER_L2_ETHER,
1170 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1171 RTE_PTYPE_INNER_L4_FRAG |
1172 RTE_PTYPE_INNER_L2_ETHER,
1174 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1175 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1176 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1177 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1178 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1179 RTE_PTYPE_INNER_L4_TCP |
1180 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1181 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1182 RTE_PTYPE_INNER_L4_TCP |
1183 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1184 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1185 RTE_PTYPE_INNER_L4_UDP |
1186 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1187 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1188 RTE_PTYPE_INNER_L4_UDP |
1189 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1190 /* Frags with VLAN */
1191 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1192 RTE_PTYPE_INNER_L4_FRAG |
1193 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1194 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1195 RTE_PTYPE_INNER_L4_FRAG |
1196 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1199 /* Bits (0..3) provides L3/L4 protocol type */
1200 /* Bits (4,5) provides frag and VLAN info */
1201 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1202 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1203 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1204 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1205 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1206 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1207 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1208 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1210 if (val < QEDE_PKT_TYPE_MAX)
1211 return ptype_lkup_tbl[val];
1213 return RTE_PTYPE_UNKNOWN;
1216 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
1221 static const uint32_t
1222 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1223 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
1224 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
1225 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 |
1228 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 |
1231 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 |
1234 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 |
1237 /* Frags with no VLAN */
1238 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4 |
1241 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6 |
1245 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4 |
1246 RTE_PTYPE_L2_ETHER_VLAN,
1247 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6 |
1248 RTE_PTYPE_L2_ETHER_VLAN,
1249 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4 |
1251 RTE_PTYPE_L2_ETHER_VLAN,
1252 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6 |
1254 RTE_PTYPE_L2_ETHER_VLAN,
1255 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4 |
1257 RTE_PTYPE_L2_ETHER_VLAN,
1258 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6 |
1260 RTE_PTYPE_L2_ETHER_VLAN,
1261 /* Frags with VLAN */
1262 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4 |
1264 RTE_PTYPE_L2_ETHER_VLAN,
1265 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6 |
1267 RTE_PTYPE_L2_ETHER_VLAN,
1270 /* Bits (0..3) provides L3/L4 protocol type */
1271 /* Bits (4,5) provides frag and VLAN info */
1272 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1273 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1274 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1275 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1276 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1277 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1278 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1279 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1281 if (val < QEDE_PKT_TYPE_MAX)
1282 return ptype_lkup_tbl[val];
1284 return RTE_PTYPE_UNKNOWN;
1287 static inline uint8_t
1288 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
1290 struct rte_ipv4_hdr *ip;
1295 val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1296 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
1298 if (unlikely(val)) {
1299 m->packet_type = qede_rx_cqe_to_pkt_type(flag);
1300 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
1301 ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
1302 sizeof(struct rte_ether_hdr));
1303 pkt_csum = ip->hdr_checksum;
1304 ip->hdr_checksum = 0;
1305 calc_csum = rte_ipv4_cksum(ip);
1306 ip->hdr_checksum = pkt_csum;
1307 return (calc_csum != pkt_csum);
1308 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
1315 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
1317 ecore_chain_consume(&rxq->rx_bd_ring);
1322 qede_reuse_page(__rte_unused struct qede_dev *qdev,
1323 struct qede_rx_queue *rxq, struct rte_mbuf *curr_cons)
1325 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
1326 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
1327 dma_addr_t new_mapping;
1329 rxq->sw_rx_ring[idx] = curr_cons;
1331 new_mapping = rte_mbuf_data_iova_default(curr_cons);
1333 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
1334 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
1340 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
1341 struct qede_dev *qdev, uint8_t count)
1343 struct rte_mbuf *curr_cons;
1345 for (; count > 0; count--) {
1346 curr_cons = rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
1347 qede_reuse_page(qdev, rxq, curr_cons);
1348 qede_rx_bd_ring_consume(rxq);
1353 qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
1354 struct qede_rx_queue *rxq,
1355 uint8_t agg_index, uint16_t len)
1357 struct qede_agg_info *tpa_info;
1358 struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
1361 /* Under certain conditions it is possible that FW may not consume
1362 * additional or new BD. So decision to consume the BD must be made
1363 * based on len_list[0].
1365 if (rte_le_to_cpu_16(len)) {
1366 tpa_info = &rxq->tpa_info[agg_index];
1367 cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1368 curr_frag = rxq->sw_rx_ring[cons_idx];
1370 curr_frag->nb_segs = 1;
1371 curr_frag->pkt_len = rte_le_to_cpu_16(len);
1372 curr_frag->data_len = curr_frag->pkt_len;
1373 tpa_info->tpa_tail->next = curr_frag;
1374 tpa_info->tpa_tail = curr_frag;
1375 qede_rx_bd_ring_consume(rxq);
1376 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1377 PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
1378 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1379 rxq->rx_alloc_errors++;
1385 qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
1386 struct qede_rx_queue *rxq,
1387 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1389 PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
1390 cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
1391 /* only len_list[0] will have value */
1392 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1397 qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
1398 struct qede_rx_queue *rxq,
1399 struct eth_fast_path_rx_tpa_end_cqe *cqe)
1401 struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
1403 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1405 /* Update total length and frags based on end TPA */
1406 rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
1407 /* TODO: Add Sanity Checks */
1408 rx_mb->nb_segs = cqe->num_of_bds;
1409 rx_mb->pkt_len = cqe->total_packet_len;
1411 PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
1412 " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
1413 rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
1417 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
1422 static const uint32_t
1423 ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
1424 [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
1425 [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
1426 [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
1427 [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
1428 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
1429 RTE_PTYPE_TUNNEL_GENEVE,
1430 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
1431 RTE_PTYPE_TUNNEL_GRE,
1432 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
1433 RTE_PTYPE_TUNNEL_VXLAN,
1434 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
1435 RTE_PTYPE_TUNNEL_GENEVE,
1436 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
1437 RTE_PTYPE_TUNNEL_GRE,
1438 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
1439 RTE_PTYPE_TUNNEL_VXLAN,
1440 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
1441 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1442 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
1443 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1444 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
1445 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1446 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
1447 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1448 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
1449 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1450 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
1451 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1452 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
1453 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1454 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
1455 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1456 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
1457 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1458 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
1459 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1460 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
1461 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1462 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
1463 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1466 /* Cover bits[4-0] to include tunn_type and next protocol */
1467 val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
1468 ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
1469 (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
1470 ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
1472 if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
1473 return ptype_tunn_lkup_tbl[val];
1475 return RTE_PTYPE_UNKNOWN;
1479 qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
1480 uint8_t num_segs, uint16_t pkt_len)
1482 struct qede_rx_queue *rxq = p_rxq;
1483 struct qede_dev *qdev = rxq->qdev;
1484 register struct rte_mbuf *seg1 = NULL;
1485 register struct rte_mbuf *seg2 = NULL;
1486 uint16_t sw_rx_index;
1491 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1493 if (unlikely(!cur_size)) {
1494 PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
1495 " left for mapping jumbo\n", num_segs);
1496 qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
1499 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1500 seg2 = rxq->sw_rx_ring[sw_rx_index];
1501 qede_rx_bd_ring_consume(rxq);
1502 pkt_len -= cur_size;
1503 seg2->data_len = cur_size;
1513 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1515 print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
1518 PMD_RX_LOG(INFO, rxq,
1519 "len 0x%04x bf 0x%04x hash_val 0x%x"
1520 " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
1521 " inner_l2=%s inner_l3=%s inner_l4=%s\n",
1522 m->data_len, bitfield, m->hash.rss,
1523 (unsigned long)m->ol_flags,
1524 rte_get_ptype_l2_name(m->packet_type),
1525 rte_get_ptype_l3_name(m->packet_type),
1526 rte_get_ptype_l4_name(m->packet_type),
1527 rte_get_ptype_tunnel_name(m->packet_type),
1528 rte_get_ptype_inner_l2_name(m->packet_type),
1529 rte_get_ptype_inner_l3_name(m->packet_type),
1530 rte_get_ptype_inner_l4_name(m->packet_type));
1535 qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1537 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1538 register struct rte_mbuf *rx_mb = NULL;
1539 struct qede_rx_queue *rxq = p_rxq;
1540 struct qede_dev *qdev = rxq->qdev;
1541 struct ecore_dev *edev = &qdev->edev;
1542 union eth_rx_cqe *cqe;
1544 enum eth_rx_cqe_type cqe_type;
1545 int rss_enable = qdev->rss_enable;
1546 int rx_alloc_count = 0;
1547 uint32_t packet_type;
1549 uint16_t vlan_tci, port_id;
1550 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index, num_rx_bds;
1551 uint16_t rx_pkt = 0;
1552 uint16_t pkt_len = 0;
1553 uint16_t len; /* Length of first BD */
1554 uint16_t preload_idx;
1555 uint16_t parse_flag;
1556 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1557 uint8_t bitfield_val;
1559 uint8_t offset, flags, bd_num;
1562 /* Allocate buffers that we used in previous loop */
1563 if (rxq->rx_alloc_count) {
1564 if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
1565 rxq->rx_alloc_count))) {
1566 struct rte_eth_dev *dev;
1568 PMD_RX_LOG(ERR, rxq,
1569 "New buffer allocation failed,"
1570 "dropping incoming packetn");
1571 dev = &rte_eth_devices[rxq->port_id];
1572 dev->data->rx_mbuf_alloc_failed +=
1573 rxq->rx_alloc_count;
1574 rxq->rx_alloc_errors += rxq->rx_alloc_count;
1577 qede_update_rx_prod(qdev, rxq);
1578 rxq->rx_alloc_count = 0;
1581 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1582 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1586 if (hw_comp_cons == sw_comp_cons)
1589 num_rx_bds = NUM_RX_BDS(rxq);
1590 port_id = rxq->port_id;
1592 while (sw_comp_cons != hw_comp_cons) {
1594 packet_type = RTE_PTYPE_UNKNOWN;
1598 /* Get the CQE from the completion ring */
1600 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1601 cqe_type = cqe->fast_path_regular.type;
1602 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1604 if (likely(cqe_type == ETH_RX_CQE_TYPE_REGULAR)) {
1605 fp_cqe = &cqe->fast_path_regular;
1607 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
1608 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1609 ecore_eth_cqe_completion
1610 (&edev->hwfns[rxq->queue_id %
1612 (struct eth_slow_path_rx_cqe *)cqe);
1617 /* Get the data from the SW ring */
1618 sw_rx_index = rxq->sw_rx_cons & num_rx_bds;
1619 rx_mb = rxq->sw_rx_ring[sw_rx_index];
1620 assert(rx_mb != NULL);
1622 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1623 offset = fp_cqe->placement_offset;
1624 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1625 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1626 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1627 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1628 bd_num = fp_cqe->bd_num;
1629 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1630 bitfield_val = fp_cqe->bitfields;
1633 if (unlikely(qede_tunn_exist(parse_flag))) {
1634 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1635 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1636 PMD_RX_LOG(ERR, rxq,
1637 "L4 csum failed, flags = 0x%x\n",
1639 rxq->rx_hw_errors++;
1640 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1642 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1645 if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1646 PMD_RX_LOG(ERR, rxq,
1647 "Outer L3 csum failed, flags = 0x%x\n",
1649 rxq->rx_hw_errors++;
1650 ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1652 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1655 flags = fp_cqe->tunnel_pars_flags.flags;
1659 qede_rx_cqe_to_tunn_pkt_type(flags);
1663 qede_rx_cqe_to_pkt_type_inner(parse_flag);
1665 /* Outer L3/L4 types is not available in CQE */
1666 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1668 /* Outer L3/L4 types is not available in CQE.
1669 * Need to add offset to parse correctly,
1671 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1672 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1674 packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1677 /* Common handling for non-tunnel packets and for inner
1678 * headers in the case of tunnel.
1680 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1681 PMD_RX_LOG(ERR, rxq,
1682 "L4 csum failed, flags = 0x%x\n",
1684 rxq->rx_hw_errors++;
1685 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1687 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1689 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1690 PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1692 rxq->rx_hw_errors++;
1693 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1695 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1698 if (unlikely(CQE_HAS_VLAN(parse_flag) ||
1699 CQE_HAS_OUTER_VLAN(parse_flag))) {
1700 /* Note: FW doesn't indicate Q-in-Q packet */
1701 ol_flags |= RTE_MBUF_F_RX_VLAN;
1702 if (qdev->vlan_strip_flg) {
1703 ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
1704 rx_mb->vlan_tci = vlan_tci;
1709 ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
1710 rx_mb->hash.rss = rss_hash;
1714 qede_rx_bd_ring_consume(rxq);
1716 /* Prefetch next mbuf while processing current one. */
1717 preload_idx = rxq->sw_rx_cons & num_rx_bds;
1718 rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
1720 /* Update rest of the MBUF fields */
1721 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1722 rx_mb->port = port_id;
1723 rx_mb->ol_flags = ol_flags;
1724 rx_mb->data_len = len;
1725 rx_mb->packet_type = packet_type;
1726 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1727 print_rx_bd_info(rx_mb, rxq, bitfield_val);
1729 rx_mb->nb_segs = bd_num;
1730 rx_mb->pkt_len = pkt_len;
1732 rx_pkts[rx_pkt] = rx_mb;
1736 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1737 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1738 if (rx_pkt == nb_pkts) {
1739 PMD_RX_LOG(DEBUG, rxq,
1740 "Budget reached nb_pkts=%u received=%u",
1746 /* Request number of buffers to be allocated in next loop */
1747 rxq->rx_alloc_count = rx_alloc_count;
1749 rxq->rcv_pkts += rx_pkt;
1750 rxq->rx_segs += rx_pkt;
1751 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
1757 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1759 struct qede_rx_queue *rxq = p_rxq;
1760 struct qede_dev *qdev = rxq->qdev;
1761 struct ecore_dev *edev = &qdev->edev;
1762 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
1763 uint16_t rx_pkt = 0;
1764 union eth_rx_cqe *cqe;
1765 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1766 register struct rte_mbuf *rx_mb = NULL;
1767 register struct rte_mbuf *seg1 = NULL;
1768 enum eth_rx_cqe_type cqe_type;
1769 uint16_t pkt_len = 0; /* Sum of all BD segments */
1770 uint16_t len; /* Length of first BD */
1771 uint8_t num_segs = 1;
1772 uint16_t preload_idx;
1773 uint16_t parse_flag;
1774 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1775 uint8_t bitfield_val;
1777 uint8_t tunn_parse_flag;
1778 struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
1780 uint32_t packet_type;
1783 uint8_t offset, tpa_agg_idx, flags;
1784 struct qede_agg_info *tpa_info = NULL;
1786 int rx_alloc_count = 0;
1789 /* Allocate buffers that we used in previous loop */
1790 if (rxq->rx_alloc_count) {
1791 if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
1792 rxq->rx_alloc_count))) {
1793 struct rte_eth_dev *dev;
1795 PMD_RX_LOG(ERR, rxq,
1796 "New buffer allocation failed,"
1797 "dropping incoming packetn");
1798 dev = &rte_eth_devices[rxq->port_id];
1799 dev->data->rx_mbuf_alloc_failed +=
1800 rxq->rx_alloc_count;
1801 rxq->rx_alloc_errors += rxq->rx_alloc_count;
1804 qede_update_rx_prod(qdev, rxq);
1805 rxq->rx_alloc_count = 0;
1808 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1809 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1813 if (hw_comp_cons == sw_comp_cons)
1816 while (sw_comp_cons != hw_comp_cons) {
1818 packet_type = RTE_PTYPE_UNKNOWN;
1820 tpa_start_flg = false;
1823 /* Get the CQE from the completion ring */
1825 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1826 cqe_type = cqe->fast_path_regular.type;
1827 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1830 case ETH_RX_CQE_TYPE_REGULAR:
1831 fp_cqe = &cqe->fast_path_regular;
1833 case ETH_RX_CQE_TYPE_TPA_START:
1834 cqe_start_tpa = &cqe->fast_path_tpa_start;
1835 tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
1836 tpa_start_flg = true;
1837 /* Mark it as LRO packet */
1838 ol_flags |= RTE_MBUF_F_RX_LRO;
1839 /* In split mode, seg_len is same as len_on_first_bd
1840 * and bw_ext_bd_len_list will be empty since there are
1841 * no additional buffers
1843 PMD_RX_LOG(INFO, rxq,
1844 "TPA start[%d] - len_on_first_bd %d header %d"
1845 " [bd_list[0] %d], [seg_len %d]\n",
1846 cqe_start_tpa->tpa_agg_index,
1847 rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
1848 cqe_start_tpa->header_len,
1849 rte_le_to_cpu_16(cqe_start_tpa->bw_ext_bd_len_list[0]),
1850 rte_le_to_cpu_16(cqe_start_tpa->seg_len));
1853 case ETH_RX_CQE_TYPE_TPA_CONT:
1854 qede_rx_process_tpa_cont_cqe(qdev, rxq,
1855 &cqe->fast_path_tpa_cont);
1857 case ETH_RX_CQE_TYPE_TPA_END:
1858 qede_rx_process_tpa_end_cqe(qdev, rxq,
1859 &cqe->fast_path_tpa_end);
1860 tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
1861 tpa_info = &rxq->tpa_info[tpa_agg_idx];
1862 rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
1864 case ETH_RX_CQE_TYPE_SLOW_PATH:
1865 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1866 ecore_eth_cqe_completion(
1867 &edev->hwfns[rxq->queue_id % edev->num_hwfns],
1868 (struct eth_slow_path_rx_cqe *)cqe);
1874 /* Get the data from the SW ring */
1875 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1876 rx_mb = rxq->sw_rx_ring[sw_rx_index];
1877 assert(rx_mb != NULL);
1879 /* Handle regular CQE or TPA start CQE */
1880 if (!tpa_start_flg) {
1881 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1882 offset = fp_cqe->placement_offset;
1883 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1884 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1885 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1886 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1887 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1888 bitfield_val = fp_cqe->bitfields;
1892 rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
1893 offset = cqe_start_tpa->placement_offset;
1894 /* seg_len = len_on_first_bd */
1895 len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
1896 vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
1897 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1898 bitfield_val = cqe_start_tpa->bitfields;
1900 rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
1902 if (qede_tunn_exist(parse_flag)) {
1903 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1904 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1905 PMD_RX_LOG(ERR, rxq,
1906 "L4 csum failed, flags = 0x%x\n",
1908 rxq->rx_hw_errors++;
1909 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1911 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1914 if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1915 PMD_RX_LOG(ERR, rxq,
1916 "Outer L3 csum failed, flags = 0x%x\n",
1918 rxq->rx_hw_errors++;
1919 ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1921 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1925 flags = cqe_start_tpa->tunnel_pars_flags.flags;
1927 flags = fp_cqe->tunnel_pars_flags.flags;
1928 tunn_parse_flag = flags;
1932 qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
1936 qede_rx_cqe_to_pkt_type_inner(parse_flag);
1938 /* Outer L3/L4 types is not available in CQE */
1939 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1941 /* Outer L3/L4 types is not available in CQE.
1942 * Need to add offset to parse correctly,
1944 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1945 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1947 packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1950 /* Common handling for non-tunnel packets and for inner
1951 * headers in the case of tunnel.
1953 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1954 PMD_RX_LOG(ERR, rxq,
1955 "L4 csum failed, flags = 0x%x\n",
1957 rxq->rx_hw_errors++;
1958 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1960 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1962 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1963 PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1965 rxq->rx_hw_errors++;
1966 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1968 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1971 if (CQE_HAS_VLAN(parse_flag) ||
1972 CQE_HAS_OUTER_VLAN(parse_flag)) {
1973 /* Note: FW doesn't indicate Q-in-Q packet */
1974 ol_flags |= RTE_MBUF_F_RX_VLAN;
1975 if (qdev->vlan_strip_flg) {
1976 ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
1977 rx_mb->vlan_tci = vlan_tci;
1982 if (qdev->rss_enable) {
1983 ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
1984 rx_mb->hash.rss = rss_hash;
1988 qede_rx_bd_ring_consume(rxq);
1990 if (!tpa_start_flg && fp_cqe->bd_num > 1) {
1991 PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
1992 " len on first: %04x Total Len: %04x",
1993 fp_cqe->bd_num, len, pkt_len);
1994 num_segs = fp_cqe->bd_num - 1;
1996 if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
2000 rx_alloc_count += num_segs;
2001 rxq->rx_segs += num_segs;
2003 rxq->rx_segs++; /* for the first segment */
2005 /* Prefetch next mbuf while processing current one. */
2006 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
2007 rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
2009 /* Update rest of the MBUF fields */
2010 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
2011 rx_mb->port = rxq->port_id;
2012 rx_mb->ol_flags = ol_flags;
2013 rx_mb->data_len = len;
2014 rx_mb->packet_type = packet_type;
2015 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
2016 print_rx_bd_info(rx_mb, rxq, bitfield_val);
2018 if (!tpa_start_flg) {
2019 rx_mb->nb_segs = fp_cqe->bd_num;
2020 rx_mb->pkt_len = pkt_len;
2022 /* store ref to the updated mbuf */
2023 tpa_info->tpa_head = rx_mb;
2024 tpa_info->tpa_tail = tpa_info->tpa_head;
2026 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
2028 if (!tpa_start_flg) {
2029 rx_pkts[rx_pkt] = rx_mb;
2033 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
2034 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2035 if (rx_pkt == nb_pkts) {
2036 PMD_RX_LOG(DEBUG, rxq,
2037 "Budget reached nb_pkts=%u received=%u",
2043 /* Request number of buffers to be allocated in next loop */
2044 rxq->rx_alloc_count = rx_alloc_count;
2046 rxq->rcv_pkts += rx_pkt;
2048 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
2054 qede_recv_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2056 struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
2057 uint16_t eng0_pkts, eng1_pkts;
2059 eng0_pkts = nb_pkts / 2;
2061 eng0_pkts = qede_recv_pkts(fp_cmt->fp0->rxq, rx_pkts, eng0_pkts);
2063 eng1_pkts = nb_pkts - eng0_pkts;
2065 eng1_pkts = qede_recv_pkts(fp_cmt->fp1->rxq, rx_pkts + eng0_pkts,
2068 return eng0_pkts + eng1_pkts;
2071 /* Populate scatter gather buffer descriptor fields */
2072 static inline uint16_t
2073 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
2074 struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
2077 struct qede_tx_queue *txq = p_txq;
2078 struct eth_tx_bd *tx_bd = NULL;
2080 uint16_t nb_segs = 0;
2082 /* Check for scattered buffers */
2084 if (start_seg == 0) {
2086 *bd2 = (struct eth_tx_2nd_bd *)
2087 ecore_chain_produce(&txq->tx_pbl);
2088 memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
2091 mapping = rte_mbuf_data_iova(m_seg);
2092 QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
2093 PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
2094 } else if (start_seg == 1) {
2096 *bd3 = (struct eth_tx_3rd_bd *)
2097 ecore_chain_produce(&txq->tx_pbl);
2098 memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
2101 mapping = rte_mbuf_data_iova(m_seg);
2102 QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
2103 PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
2105 tx_bd = (struct eth_tx_bd *)
2106 ecore_chain_produce(&txq->tx_pbl);
2107 memset(tx_bd, 0, sizeof(*tx_bd));
2109 mapping = rte_mbuf_data_iova(m_seg);
2110 QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
2111 PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
2114 m_seg = m_seg->next;
2117 /* Return total scattered buffers */
2121 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2123 print_tx_bd_info(struct qede_tx_queue *txq,
2124 struct eth_tx_1st_bd *bd1,
2125 struct eth_tx_2nd_bd *bd2,
2126 struct eth_tx_3rd_bd *bd3,
2127 uint64_t tx_ol_flags)
2129 char ol_buf[256] = { 0 }; /* for verbose prints */
2132 PMD_TX_LOG(INFO, txq,
2133 "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
2134 rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
2135 bd1->data.bd_flags.bitfields,
2136 rte_cpu_to_le_16(bd1->data.bitfields));
2138 PMD_TX_LOG(INFO, txq,
2139 "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
2140 rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
2141 bd2->data.bitfields2, bd2->data.tunn_ip_size);
2143 PMD_TX_LOG(INFO, txq,
2144 "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
2145 "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
2146 rte_cpu_to_le_16(bd3->nbytes),
2147 rte_cpu_to_le_16(bd3->data.bitfields),
2148 rte_cpu_to_le_16(bd3->data.lso_mss),
2149 bd3->data.tunn_l4_hdr_start_offset_w,
2150 bd3->data.tunn_hdr_size_w);
2152 rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
2153 PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
2157 /* TX prepare to check packets meets TX conditions */
2159 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2160 qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
2163 struct qede_tx_queue *txq = p_txq;
2165 qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
2172 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2176 for (i = 0; i < nb_pkts; i++) {
2178 ol_flags = m->ol_flags;
2179 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2180 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
2184 /* TBD: confirm its ~9700B for both ? */
2185 if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
2190 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
2195 if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
2196 /* We support only limited tunnel protocols */
2197 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
2200 temp = ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
2201 if (temp == RTE_MBUF_F_TX_TUNNEL_VXLAN ||
2202 temp == RTE_MBUF_F_TX_TUNNEL_GENEVE ||
2203 temp == RTE_MBUF_F_TX_TUNNEL_MPLSINUDP ||
2204 temp == RTE_MBUF_F_TX_TUNNEL_GRE)
2208 rte_errno = ENOTSUP;
2212 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2213 ret = rte_validate_tx_offload(m);
2221 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2222 if (unlikely(i != nb_pkts))
2223 PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
2229 #define MPLSINUDP_HDR_SIZE (12)
2231 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2233 qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
2234 struct qede_tx_queue *txq)
2236 if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
2237 PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
2238 if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
2239 MPLSINUDP_HDR_SIZE) / 2) > 0xff)
2240 PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
2241 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
2242 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
2243 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
2244 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
2245 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
2246 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
2251 qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2253 struct qede_tx_queue *txq = p_txq;
2254 struct qede_dev *qdev = txq->qdev;
2255 struct ecore_dev *edev = &qdev->edev;
2256 struct eth_tx_1st_bd *bd1;
2257 struct eth_tx_2nd_bd *bd2;
2258 struct eth_tx_3rd_bd *bd3;
2259 struct rte_mbuf *m_seg = NULL;
2260 struct rte_mbuf *mbuf;
2261 struct rte_mbuf **sw_tx_ring;
2262 uint16_t nb_tx_pkts;
2265 uint16_t nb_frags = 0;
2266 uint16_t nb_pkt_sent = 0;
2268 uint64_t tx_ol_flags;
2271 uint8_t bd1_bd_flags_bf;
2273 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
2274 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
2275 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
2276 qede_process_tx_compl(edev, txq);
2279 nb_tx_pkts = nb_pkts;
2280 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2281 sw_tx_ring = txq->sw_tx_ring;
2283 while (nb_tx_pkts--) {
2284 /* Init flags/values */
2290 bd1_bd_flags_bf = 0;
2297 /* Check minimum TX BDS availability against available BDs */
2298 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
2301 tx_ol_flags = mbuf->ol_flags;
2302 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2304 if (unlikely(txq->nb_tx_avail <
2305 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2308 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2309 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2311 /* Offload the IP checksum in the hardware */
2312 if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
2314 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2316 /* L4 checksum offload (tcp or udp) */
2317 if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
2318 (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM)))
2320 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2322 /* Fill the entry in the SW ring and the BDs in the FW ring */
2324 sw_tx_ring[idx] = mbuf;
2327 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2328 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2331 /* Map MBUF linear data for DMA and set in the BD1 */
2332 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2334 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2335 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2337 /* Handle fragmented MBUF */
2338 if (unlikely(mbuf->nb_segs > 1)) {
2341 /* Encode scatter gather buffer descriptors */
2342 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3,
2346 bd1->data.nbds = nbds + nb_frags;
2348 txq->nb_tx_avail -= bd1->data.nbds;
2351 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2352 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2353 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2359 /* Write value of prod idx into bd_prod */
2360 txq->tx_db.data.bd_prod = bd_prod;
2362 rte_compiler_barrier();
2363 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2366 /* Check again for Tx completions */
2367 qede_process_tx_compl(edev, txq);
2369 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2370 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2376 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2378 struct qede_tx_queue *txq = p_txq;
2379 struct qede_dev *qdev = txq->qdev;
2380 struct ecore_dev *edev = &qdev->edev;
2381 struct rte_mbuf *mbuf;
2382 struct rte_mbuf *m_seg = NULL;
2383 uint16_t nb_tx_pkts;
2387 uint16_t nb_pkt_sent = 0;
2391 __rte_unused bool tunn_flg;
2392 bool tunn_ipv6_ext_flg;
2393 struct eth_tx_1st_bd *bd1;
2394 struct eth_tx_2nd_bd *bd2;
2395 struct eth_tx_3rd_bd *bd3;
2396 uint64_t tx_ol_flags;
2400 uint8_t bd1_bd_flags_bf;
2409 uint8_t tunn_l4_hdr_start_offset;
2410 uint8_t tunn_hdr_size;
2411 uint8_t inner_l2_hdr_size;
2412 uint16_t inner_l4_hdr_offset;
2414 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
2415 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
2416 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
2417 qede_process_tx_compl(edev, txq);
2420 nb_tx_pkts = nb_pkts;
2421 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2422 while (nb_tx_pkts--) {
2423 /* Init flags/values */
2433 bd1_bd_flags_bf = 0;
2438 mplsoudp_flg = false;
2439 tunn_ipv6_ext_flg = false;
2441 tunn_l4_hdr_start_offset = 0;
2446 /* Check minimum TX BDS availability against available BDs */
2447 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
2450 tx_ol_flags = mbuf->ol_flags;
2451 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2453 /* TX prepare would have already checked supported tunnel Tx
2454 * offloads. Don't rely on pkt_type marked by Rx, instead use
2455 * tx_ol_flags to decide.
2457 tunn_flg = !!(tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
2460 /* Check against max which is Tunnel IPv6 + ext */
2461 if (unlikely(txq->nb_tx_avail <
2462 ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
2465 /* First indicate its a tunnel pkt */
2466 bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
2467 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2468 /* Legacy FW had flipped behavior in regard to this bit
2469 * i.e. it needed to set to prevent FW from touching
2470 * encapsulated packets when it didn't need to.
2472 if (unlikely(txq->is_legacy)) {
2474 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2477 /* Outer IP checksum offload */
2478 if (tx_ol_flags & (RTE_MBUF_F_TX_OUTER_IP_CKSUM |
2479 RTE_MBUF_F_TX_OUTER_IPV4)) {
2481 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
2482 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
2486 * Currently, only inner checksum offload in MPLS-in-UDP
2487 * tunnel with one MPLS label is supported. Both outer
2488 * and inner layers lengths need to be provided in
2491 if ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ==
2492 RTE_MBUF_F_TX_TUNNEL_MPLSINUDP) {
2493 mplsoudp_flg = true;
2494 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2495 qede_mpls_tunn_tx_sanity_check(mbuf, txq);
2497 /* Outer L4 offset in two byte words */
2498 tunn_l4_hdr_start_offset =
2499 (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
2500 /* Tunnel header size in two byte words */
2501 tunn_hdr_size = (mbuf->outer_l2_len +
2502 mbuf->outer_l3_len +
2503 MPLSINUDP_HDR_SIZE) / 2;
2504 /* Inner L2 header size in two byte words */
2505 inner_l2_hdr_size = (mbuf->l2_len -
2506 MPLSINUDP_HDR_SIZE) / 2;
2507 /* Inner L4 header offset from the beginning
2508 * of inner packet in two byte words
2510 inner_l4_hdr_offset = (mbuf->l2_len -
2511 MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
2513 /* Inner L2 size and address type */
2514 bd2_bf1 |= (inner_l2_hdr_size &
2515 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
2516 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
2517 bd2_bf1 |= (UNICAST_ADDRESS &
2518 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
2519 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
2520 /* Treated as IPv6+Ext */
2522 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
2524 /* Mark inner IPv6 if present */
2525 if (tx_ol_flags & RTE_MBUF_F_TX_IPV6)
2527 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
2529 /* Inner L4 offsets */
2530 if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
2531 (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM |
2532 RTE_MBUF_F_TX_TCP_CKSUM))) {
2533 /* Determines if BD3 is needed */
2534 tunn_ipv6_ext_flg = true;
2535 if ((tx_ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
2536 RTE_MBUF_F_TX_UDP_CKSUM) {
2538 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
2541 /* TODO other pseudo checksum modes are
2545 ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
2546 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
2547 bd2_bf2 |= (inner_l4_hdr_offset &
2548 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
2549 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
2551 } /* End MPLSoUDP */
2552 } /* End Tunnel handling */
2554 if (tx_ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2556 if (unlikely(txq->nb_tx_avail <
2557 ETH_TX_MIN_BDS_PER_LSO_PKT))
2559 /* For LSO, packet header and payload must reside on
2560 * buffers pointed by different BDs. Using BD1 for HDR
2561 * and BD2 onwards for data.
2563 hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
2565 hdr_size += mbuf->outer_l2_len +
2568 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
2570 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2571 /* RTE_MBUF_F_TX_TCP_SEG implies RTE_MBUF_F_TX_TCP_CKSUM */
2573 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2574 mss = rte_cpu_to_le_16(mbuf->tso_segsz);
2575 /* Using one header BD */
2576 bd3_bf |= rte_cpu_to_le_16(1 <<
2577 ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2579 if (unlikely(txq->nb_tx_avail <
2580 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2583 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2584 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2587 /* Descriptor based VLAN insertion */
2588 if (tx_ol_flags & RTE_MBUF_F_TX_VLAN) {
2589 vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
2591 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
2594 /* Offload the IP checksum in the hardware */
2595 if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
2597 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2598 /* There's no DPDK flag to request outer-L4 csum
2599 * offload. But in the case of tunnel if inner L3 or L4
2600 * csum offload is requested then we need to force
2601 * recalculation of L4 tunnel header csum also.
2603 if (tunn_flg && ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) !=
2604 RTE_MBUF_F_TX_TUNNEL_GRE)) {
2606 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2607 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2611 /* L4 checksum offload (tcp or udp) */
2612 if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
2613 (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM))) {
2615 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2616 /* There's no DPDK flag to request outer-L4 csum
2617 * offload. But in the case of tunnel if inner L3 or L4
2618 * csum offload is requested then we need to force
2619 * recalculation of L4 tunnel header csum also.
2623 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2624 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2628 /* Fill the entry in the SW ring and the BDs in the FW ring */
2630 txq->sw_tx_ring[idx] = mbuf;
2633 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2634 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2637 /* Map MBUF linear data for DMA and set in the BD1 */
2638 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2640 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2641 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2642 bd1->data.vlan = vlan;
2644 if (lso_flg || mplsoudp_flg) {
2645 bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
2647 memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
2651 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2654 QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
2655 rte_mbuf_data_iova(mbuf)),
2656 mbuf->data_len - hdr_size);
2657 bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
2659 bd2->data.bitfields2 =
2660 rte_cpu_to_le_16(bd2_bf2);
2662 bd2->data.tunn_ip_size =
2663 rte_cpu_to_le_16(mbuf->outer_l3_len);
2666 if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
2667 bd3 = (struct eth_tx_3rd_bd *)
2668 ecore_chain_produce(&txq->tx_pbl);
2669 memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
2671 bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
2673 bd3->data.lso_mss = mss;
2675 bd3->data.tunn_l4_hdr_start_offset_w =
2676 tunn_l4_hdr_start_offset;
2677 bd3->data.tunn_hdr_size_w =
2683 /* Handle fragmented MBUF */
2686 /* Encode scatter gather buffer descriptors if required */
2687 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
2688 bd1->data.nbds = nbds + nb_frags;
2690 txq->nb_tx_avail -= bd1->data.nbds;
2693 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2694 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2695 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2701 /* Write value of prod idx into bd_prod */
2702 txq->tx_db.data.bd_prod = bd_prod;
2704 rte_compiler_barrier();
2705 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2708 /* Check again for Tx completions */
2709 qede_process_tx_compl(edev, txq);
2711 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2712 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2718 qede_xmit_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2720 struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
2721 uint16_t eng0_pkts, eng1_pkts;
2723 eng0_pkts = nb_pkts / 2;
2725 eng0_pkts = qede_xmit_pkts(fp_cmt->fp0->txq, tx_pkts, eng0_pkts);
2727 eng1_pkts = nb_pkts - eng0_pkts;
2729 eng1_pkts = qede_xmit_pkts(fp_cmt->fp1->txq, tx_pkts + eng0_pkts,
2732 return eng0_pkts + eng1_pkts;
2735 /* this function does a fake walk through over completion queue
2736 * to calculate number of BDs used by HW.
2737 * At the end, it restores the state of completion queue.
2740 qede_parse_fp_cqe(struct qede_rx_queue *rxq)
2742 uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
2743 union eth_rx_cqe *cqe, *orig_cqe = NULL;
2745 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
2746 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2748 if (hw_comp_cons == sw_comp_cons)
2751 /* Get the CQE from the completion ring */
2752 cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2755 while (sw_comp_cons != hw_comp_cons) {
2756 switch (cqe->fast_path_regular.type) {
2757 case ETH_RX_CQE_TYPE_REGULAR:
2758 bd_count += cqe->fast_path_regular.bd_num;
2760 case ETH_RX_CQE_TYPE_TPA_END:
2761 bd_count += cqe->fast_path_tpa_end.num_of_bds;
2768 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2769 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2772 /* revert comp_ring to original state */
2773 ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
2779 qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
2781 uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
2782 uint16_t produced, consumed;
2783 struct qede_rx_queue *rxq = p_rxq;
2785 if (offset > rxq->nb_rx_desc)
2788 sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
2789 sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
2791 /* find BDs used by HW from completion queue elements */
2792 hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
2794 if (hw_bd_cons < sw_bd_cons)
2795 /* wraparound case */
2796 consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
2798 consumed = hw_bd_cons - sw_bd_cons;
2800 if (offset <= consumed)
2801 return RTE_ETH_RX_DESC_DONE;
2803 if (sw_bd_prod < sw_bd_cons)
2804 /* wraparound case */
2805 produced = (0xffff - sw_bd_cons) + sw_bd_prod;
2807 produced = sw_bd_prod - sw_bd_cons;
2809 if (offset <= produced)
2810 return RTE_ETH_RX_DESC_AVAIL;
2812 return RTE_ETH_RX_DESC_UNAVAIL;