1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
10 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
12 struct rte_mbuf *new_mb = NULL;
13 struct eth_rx_bd *rx_bd;
15 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
17 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
18 if (unlikely(!new_mb)) {
20 "Failed to allocate rx buffer "
21 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
22 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
23 rte_mempool_avail_count(rxq->mb_pool),
24 rte_mempool_in_use_count(rxq->mb_pool));
27 rxq->sw_rx_ring[idx] = new_mb;
28 mapping = rte_mbuf_data_iova_default(new_mb);
29 /* Advance PROD and get BD pointer */
30 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
31 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
32 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
37 #define QEDE_MAX_BULK_ALLOC_COUNT 512
39 static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
41 void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
42 struct rte_mbuf *mbuf = NULL;
43 struct eth_rx_bd *rx_bd;
48 idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
50 ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
53 "Failed to allocate %d rx buffers "
54 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
55 count, idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
56 rte_mempool_avail_count(rxq->mb_pool),
57 rte_mempool_in_use_count(rxq->mb_pool));
61 for (i = 0; i < count; i++) {
63 if (likely(i < count - 1))
64 rte_prefetch0(obj_p[i + 1]);
66 idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
67 rxq->sw_rx_ring[idx] = mbuf;
68 mapping = rte_mbuf_data_iova_default(mbuf);
69 rx_bd = (struct eth_rx_bd *)
70 ecore_chain_produce(&rxq->rx_bd_ring);
71 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
72 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
79 /* Criterias for calculating Rx buffer size -
80 * 1) rx_buf_size should not exceed the size of mbuf
81 * 2) In scattered_rx mode - minimum rx_buf_size should be
82 * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
83 * 3) In regular mode - minimum rx_buf_size should be
84 * (MTU + Maximum L2 Header Size + 2)
85 * In above cases +2 corresponds to 2 bytes padding in front of L2
87 * 4) rx_buf_size should be cacheline-size aligned. So considering
88 * criteria 1, we need to adjust the size to floor instead of ceil,
89 * so that we don't exceed mbuf size while ceiling rx_buf_size.
92 qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
93 uint16_t max_frame_size)
95 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
96 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
99 if (dev->data->scattered_rx) {
100 /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
101 * buffers can be used for single packet. So need to make sure
102 * mbuf size is sufficient enough for this.
104 if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
105 (max_frame_size + QEDE_ETH_OVERHEAD)) {
106 DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
107 mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
111 rx_buf_size = RTE_MAX(mbufsz,
112 (max_frame_size + QEDE_ETH_OVERHEAD) /
113 ETH_RX_MAX_BUFF_PER_PKT);
115 rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
118 /* Align to cache-line size if needed */
119 return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
122 static struct qede_rx_queue *
123 qede_alloc_rx_queue_mem(struct rte_eth_dev *dev,
126 unsigned int socket_id,
127 struct rte_mempool *mp,
130 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
131 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
132 struct qede_rx_queue *rxq;
136 /* First allocate the rx queue data structure */
137 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
138 RTE_CACHE_LINE_SIZE, socket_id);
141 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
148 rxq->nb_rx_desc = nb_desc;
149 rxq->queue_id = queue_idx;
150 rxq->port_id = dev->data->port_id;
153 rxq->rx_buf_size = bufsz;
155 DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
156 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
158 /* Allocate the parallel driver ring for Rx buffers */
159 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
160 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
161 RTE_CACHE_LINE_SIZE, socket_id);
162 if (!rxq->sw_rx_ring) {
163 DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
164 " socket %u\n", socket_id);
169 /* Allocate FW Rx ring */
170 rc = qdev->ops->common->chain_alloc(edev,
171 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
172 ECORE_CHAIN_MODE_NEXT_PTR,
173 ECORE_CHAIN_CNT_TYPE_U16,
175 sizeof(struct eth_rx_bd),
179 if (rc != ECORE_SUCCESS) {
180 DP_ERR(edev, "Memory allocation fails for RX BD ring"
181 " on socket %u\n", socket_id);
182 rte_free(rxq->sw_rx_ring);
187 /* Allocate FW completion ring */
188 rc = qdev->ops->common->chain_alloc(edev,
189 ECORE_CHAIN_USE_TO_CONSUME,
190 ECORE_CHAIN_MODE_PBL,
191 ECORE_CHAIN_CNT_TYPE_U16,
193 sizeof(union eth_rx_cqe),
197 if (rc != ECORE_SUCCESS) {
198 DP_ERR(edev, "Memory allocation fails for RX CQE ring"
199 " on socket %u\n", socket_id);
200 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
201 rte_free(rxq->sw_rx_ring);
210 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
211 uint16_t nb_desc, unsigned int socket_id,
212 __rte_unused const struct rte_eth_rxconf *rx_conf,
213 struct rte_mempool *mp)
215 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
216 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
217 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
218 struct qede_rx_queue *rxq;
219 uint16_t max_rx_pktlen;
223 PMD_INIT_FUNC_TRACE(edev);
225 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
226 if (!rte_is_power_of_2(nb_desc)) {
227 DP_ERR(edev, "Ring size %u is not power of 2\n",
232 /* Free memory prior to re-allocation if needed... */
233 if (dev->data->rx_queues[qid] != NULL) {
234 qede_rx_queue_release(dev->data->rx_queues[qid]);
235 dev->data->rx_queues[qid] = NULL;
238 max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
240 /* Fix up RX buffer size */
241 bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
242 /* cache align the mbuf size to simplify rx_buf_size calculation */
243 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
244 if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
245 (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
246 if (!dev->data->scattered_rx) {
247 DP_INFO(edev, "Forcing scatter-gather mode\n");
248 dev->data->scattered_rx = 1;
252 rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pktlen);
258 if (ECORE_IS_CMT(edev)) {
259 rxq = qede_alloc_rx_queue_mem(dev, qid * 2, nb_desc,
260 socket_id, mp, bufsz);
264 qdev->fp_array[qid * 2].rxq = rxq;
265 rxq = qede_alloc_rx_queue_mem(dev, qid * 2 + 1, nb_desc,
266 socket_id, mp, bufsz);
270 qdev->fp_array[qid * 2 + 1].rxq = rxq;
271 /* provide per engine fp struct as rx queue */
272 dev->data->rx_queues[qid] = &qdev->fp_array_cmt[qid];
274 rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,
275 socket_id, mp, bufsz);
279 dev->data->rx_queues[qid] = rxq;
280 qdev->fp_array[qid].rxq = rxq;
283 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
284 qid, nb_desc, rxq->rx_buf_size, socket_id);
290 qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
291 struct qede_rx_queue *rxq)
293 DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
294 ecore_chain_reset(&rxq->rx_bd_ring);
295 ecore_chain_reset(&rxq->rx_comp_ring);
298 *rxq->hw_cons_ptr = 0;
301 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
305 if (rxq->sw_rx_ring) {
306 for (i = 0; i < rxq->nb_rx_desc; i++) {
307 if (rxq->sw_rx_ring[i]) {
308 rte_pktmbuf_free(rxq->sw_rx_ring[i]);
309 rxq->sw_rx_ring[i] = NULL;
315 static void _qede_rx_queue_release(struct qede_dev *qdev,
316 struct ecore_dev *edev,
317 struct qede_rx_queue *rxq)
319 qede_rx_queue_release_mbufs(rxq);
320 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
321 qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
322 rte_free(rxq->sw_rx_ring);
326 void qede_rx_queue_release(void *rx_queue)
328 struct qede_rx_queue *rxq = rx_queue;
329 struct qede_fastpath_cmt *fp_cmt;
330 struct qede_dev *qdev;
331 struct ecore_dev *edev;
335 edev = QEDE_INIT_EDEV(qdev);
336 PMD_INIT_FUNC_TRACE(edev);
337 if (ECORE_IS_CMT(edev)) {
339 _qede_rx_queue_release(qdev, edev, fp_cmt->fp0->rxq);
340 _qede_rx_queue_release(qdev, edev, fp_cmt->fp1->rxq);
342 _qede_rx_queue_release(qdev, edev, rxq);
347 /* Stops a given RX queue in the HW */
348 static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
350 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
351 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
352 struct ecore_hwfn *p_hwfn;
353 struct qede_rx_queue *rxq;
357 if (rx_queue_id < qdev->num_rx_queues) {
358 rxq = qdev->fp_array[rx_queue_id].rxq;
359 hwfn_index = rx_queue_id % edev->num_hwfns;
360 p_hwfn = &edev->hwfns[hwfn_index];
361 rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
363 if (rc != ECORE_SUCCESS) {
364 DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
367 qede_rx_queue_release_mbufs(rxq);
368 qede_rx_queue_reset(qdev, rxq);
369 eth_dev->data->rx_queue_state[rx_queue_id] =
370 RTE_ETH_QUEUE_STATE_STOPPED;
371 DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
373 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
380 static struct qede_tx_queue *
381 qede_alloc_tx_queue_mem(struct rte_eth_dev *dev,
384 unsigned int socket_id,
385 const struct rte_eth_txconf *tx_conf)
387 struct qede_dev *qdev = dev->data->dev_private;
388 struct ecore_dev *edev = &qdev->edev;
389 struct qede_tx_queue *txq;
391 size_t sw_tx_ring_size;
393 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
394 RTE_CACHE_LINE_SIZE, socket_id);
398 "Unable to allocate memory for txq on socket %u",
403 txq->nb_tx_desc = nb_desc;
405 txq->port_id = dev->data->port_id;
407 rc = qdev->ops->common->chain_alloc(edev,
408 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
409 ECORE_CHAIN_MODE_PBL,
410 ECORE_CHAIN_CNT_TYPE_U16,
412 sizeof(union eth_tx_bd_types),
415 if (rc != ECORE_SUCCESS) {
417 "Unable to allocate memory for txbd ring on socket %u",
419 qede_tx_queue_release(txq);
423 /* Allocate software ring */
424 sw_tx_ring_size = sizeof(txq->sw_tx_ring) * txq->nb_tx_desc;
425 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
427 RTE_CACHE_LINE_SIZE, socket_id);
429 if (!txq->sw_tx_ring) {
431 "Unable to allocate memory for txbd ring on socket %u",
433 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
434 qede_tx_queue_release(txq);
438 txq->queue_id = queue_idx;
440 txq->nb_tx_avail = txq->nb_tx_desc;
442 txq->tx_free_thresh =
443 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
444 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
447 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
448 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
453 qede_tx_queue_setup(struct rte_eth_dev *dev,
456 unsigned int socket_id,
457 const struct rte_eth_txconf *tx_conf)
459 struct qede_dev *qdev = dev->data->dev_private;
460 struct ecore_dev *edev = &qdev->edev;
461 struct qede_tx_queue *txq;
463 PMD_INIT_FUNC_TRACE(edev);
465 if (!rte_is_power_of_2(nb_desc)) {
466 DP_ERR(edev, "Ring size %u is not power of 2\n",
471 /* Free memory prior to re-allocation if needed... */
472 if (dev->data->tx_queues[queue_idx] != NULL) {
473 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
474 dev->data->tx_queues[queue_idx] = NULL;
477 if (ECORE_IS_CMT(edev)) {
478 txq = qede_alloc_tx_queue_mem(dev, queue_idx * 2, nb_desc,
483 qdev->fp_array[queue_idx * 2].txq = txq;
484 txq = qede_alloc_tx_queue_mem(dev, (queue_idx * 2) + 1, nb_desc,
489 qdev->fp_array[(queue_idx * 2) + 1].txq = txq;
490 dev->data->tx_queues[queue_idx] =
491 &qdev->fp_array_cmt[queue_idx];
493 txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,
498 dev->data->tx_queues[queue_idx] = txq;
499 qdev->fp_array[queue_idx].txq = txq;
506 qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
507 struct qede_tx_queue *txq)
509 DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
510 ecore_chain_reset(&txq->tx_pbl);
513 *txq->hw_cons_ptr = 0;
516 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
520 if (txq->sw_tx_ring) {
521 for (i = 0; i < txq->nb_tx_desc; i++) {
522 if (txq->sw_tx_ring[i]) {
523 rte_pktmbuf_free(txq->sw_tx_ring[i]);
524 txq->sw_tx_ring[i] = NULL;
530 static void _qede_tx_queue_release(struct qede_dev *qdev,
531 struct ecore_dev *edev,
532 struct qede_tx_queue *txq)
534 qede_tx_queue_release_mbufs(txq);
535 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
536 rte_free(txq->sw_tx_ring);
540 void qede_tx_queue_release(void *tx_queue)
542 struct qede_tx_queue *txq = tx_queue;
543 struct qede_fastpath_cmt *fp_cmt;
544 struct qede_dev *qdev;
545 struct ecore_dev *edev;
549 edev = QEDE_INIT_EDEV(qdev);
550 PMD_INIT_FUNC_TRACE(edev);
552 if (ECORE_IS_CMT(edev)) {
554 _qede_tx_queue_release(qdev, edev, fp_cmt->fp0->txq);
555 _qede_tx_queue_release(qdev, edev, fp_cmt->fp1->txq);
557 _qede_tx_queue_release(qdev, edev, txq);
562 /* This function allocates fast-path status block memory */
564 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
567 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
568 struct status_block *sb_virt;
572 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
573 sizeof(struct status_block));
575 DP_ERR(edev, "Status block allocation failed\n");
578 rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
581 DP_ERR(edev, "Status block initialization failed\n");
582 OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
583 sizeof(struct status_block));
590 int qede_alloc_fp_resc(struct qede_dev *qdev)
592 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
593 struct qede_fastpath *fp;
598 PMD_INIT_FUNC_TRACE(edev);
601 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
603 num_sbs = ecore_cxt_get_proto_cid_count
604 (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
607 DP_ERR(edev, "No status blocks available\n");
611 qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
612 sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
614 if (!qdev->fp_array) {
615 DP_ERR(edev, "fp array allocation failed\n");
619 memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
620 sizeof(*qdev->fp_array));
622 if (ECORE_IS_CMT(edev)) {
623 qdev->fp_array_cmt = rte_calloc("fp_cmt",
624 QEDE_RXTX_MAX(qdev) / 2,
625 sizeof(*qdev->fp_array_cmt),
626 RTE_CACHE_LINE_SIZE);
628 if (!qdev->fp_array_cmt) {
629 DP_ERR(edev, "fp array for CMT allocation failed\n");
633 memset((void *)qdev->fp_array_cmt, 0,
634 (QEDE_RXTX_MAX(qdev) / 2) * sizeof(*qdev->fp_array_cmt));
636 /* Establish the mapping of fp_array with fp_array_cmt */
637 for (i = 0; i < QEDE_RXTX_MAX(qdev) / 2; i++) {
638 qdev->fp_array_cmt[i].qdev = qdev;
639 qdev->fp_array_cmt[i].fp0 = &qdev->fp_array[i * 2];
640 qdev->fp_array_cmt[i].fp1 = &qdev->fp_array[i * 2 + 1];
644 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
645 fp = &qdev->fp_array[sb_idx];
646 fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
647 RTE_CACHE_LINE_SIZE);
649 DP_ERR(edev, "FP sb_info allocation fails\n");
652 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
653 DP_ERR(edev, "FP status block allocation fails\n");
656 DP_INFO(edev, "sb_info idx 0x%x initialized\n",
657 fp->sb_info->igu_sb_id);
663 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
665 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
666 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
667 struct qede_fastpath *fp;
671 PMD_INIT_FUNC_TRACE(edev);
673 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
674 fp = &qdev->fp_array[sb_idx];
676 DP_INFO(edev, "Free sb_info index 0x%x\n",
677 fp->sb_info->igu_sb_id);
678 OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
679 fp->sb_info->sb_phys,
680 sizeof(struct status_block));
681 rte_free(fp->sb_info);
686 /* Free packet buffers and ring memories */
687 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
688 if (eth_dev->data->rx_queues[i]) {
689 qede_rx_queue_release(eth_dev->data->rx_queues[i]);
690 eth_dev->data->rx_queues[i] = NULL;
694 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
695 if (eth_dev->data->tx_queues[i]) {
696 qede_tx_queue_release(eth_dev->data->tx_queues[i]);
697 eth_dev->data->tx_queues[i] = NULL;
701 rte_free(qdev->fp_array);
702 qdev->fp_array = NULL;
704 rte_free(qdev->fp_array_cmt);
705 qdev->fp_array_cmt = NULL;
709 qede_update_rx_prod(__rte_unused struct qede_dev *edev,
710 struct qede_rx_queue *rxq)
712 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
713 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
714 struct eth_rx_prod_data rx_prods;
716 /* Update producers */
717 memset(&rx_prods, 0, sizeof(rx_prods));
718 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
719 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
721 /* Make sure that the BD and SGE data is updated before updating the
722 * producers since FW might read the BD/SGE right after the producer
727 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
728 (uint32_t *)&rx_prods);
730 /* mmiowb is needed to synchronize doorbell writes from more than one
731 * processor. It guarantees that the write arrives to the device before
732 * the napi lock is released and another qede_poll is called (possibly
733 * on another CPU). Without this barrier, the next doorbell can bypass
734 * this doorbell. This is applicable to IA64/Altix systems.
738 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
741 /* Starts a given RX queue in HW */
743 qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
745 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
746 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
747 struct ecore_queue_start_common_params params;
748 struct ecore_rxq_start_ret_params ret_params;
749 struct qede_rx_queue *rxq;
750 struct qede_fastpath *fp;
751 struct ecore_hwfn *p_hwfn;
752 dma_addr_t p_phys_table;
758 if (rx_queue_id < qdev->num_rx_queues) {
759 fp = &qdev->fp_array[rx_queue_id];
761 /* Allocate buffers for the Rx ring */
762 for (j = 0; j < rxq->nb_rx_desc; j++) {
763 rc = qede_alloc_rx_buffer(rxq);
765 DP_ERR(edev, "RX buffer allocation failed"
766 " for rxq = %u\n", rx_queue_id);
770 /* disable interrupts */
771 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
773 memset(¶ms, 0, sizeof(params));
774 params.queue_id = rx_queue_id / edev->num_hwfns;
776 params.stats_id = params.vport_id;
777 params.p_sb = fp->sb_info;
778 DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
779 fp->rxq->queue_id, fp->sb_info->igu_sb_id);
780 params.sb_idx = RX_PI;
781 hwfn_index = rx_queue_id % edev->num_hwfns;
782 p_hwfn = &edev->hwfns[hwfn_index];
783 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
784 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
785 memset(&ret_params, 0, sizeof(ret_params));
786 rc = ecore_eth_rx_queue_start(p_hwfn,
787 p_hwfn->hw_info.opaque_fid,
788 ¶ms, fp->rxq->rx_buf_size,
789 fp->rxq->rx_bd_ring.p_phys_addr,
790 p_phys_table, page_cnt,
793 DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
797 /* Update with the returned parameters */
798 fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
799 fp->rxq->handle = ret_params.p_handle;
801 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[RX_PI];
802 qede_update_rx_prod(qdev, fp->rxq);
803 eth_dev->data->rx_queue_state[rx_queue_id] =
804 RTE_ETH_QUEUE_STATE_STARTED;
805 DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
807 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
815 qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
817 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
818 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
819 struct ecore_queue_start_common_params params;
820 struct ecore_txq_start_ret_params ret_params;
821 struct ecore_hwfn *p_hwfn;
822 dma_addr_t p_phys_table;
823 struct qede_tx_queue *txq;
824 struct qede_fastpath *fp;
829 if (tx_queue_id < qdev->num_tx_queues) {
830 fp = &qdev->fp_array[tx_queue_id];
832 memset(¶ms, 0, sizeof(params));
833 params.queue_id = tx_queue_id / edev->num_hwfns;
835 params.stats_id = params.vport_id;
836 params.p_sb = fp->sb_info;
837 DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
838 fp->txq->queue_id, fp->sb_info->igu_sb_id);
839 params.sb_idx = TX_PI(0); /* tc = 0 */
840 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
841 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
842 hwfn_index = tx_queue_id % edev->num_hwfns;
843 p_hwfn = &edev->hwfns[hwfn_index];
844 if (qdev->dev_info.is_legacy)
845 fp->txq->is_legacy = true;
846 rc = ecore_eth_tx_queue_start(p_hwfn,
847 p_hwfn->hw_info.opaque_fid,
849 p_phys_table, page_cnt,
851 if (rc != ECORE_SUCCESS) {
852 DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
856 txq->doorbell_addr = ret_params.p_doorbell;
857 txq->handle = ret_params.p_handle;
859 txq->hw_cons_ptr = &fp->sb_info->sb_pi_array[TX_PI(0)];
860 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
862 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
864 SET_FIELD(txq->tx_db.data.params,
865 ETH_DB_DATA_AGG_VAL_SEL,
866 DQ_XCM_ETH_TX_BD_PROD_CMD);
867 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
868 eth_dev->data->tx_queue_state[tx_queue_id] =
869 RTE_ETH_QUEUE_STATE_STARTED;
870 DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
872 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
880 qede_free_tx_pkt(struct qede_tx_queue *txq)
882 struct rte_mbuf *mbuf;
887 mbuf = txq->sw_tx_ring[idx];
889 nb_segs = mbuf->nb_segs;
890 PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
892 /* It's like consuming rxbuf in recv() */
893 ecore_chain_consume(&txq->tx_pbl);
897 rte_pktmbuf_free(mbuf);
898 txq->sw_tx_ring[idx] = NULL;
900 PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
902 ecore_chain_consume(&txq->tx_pbl);
908 qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
909 struct qede_tx_queue *txq)
912 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
916 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
917 /* read barrier prevents speculative execution on stale data */
920 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
921 sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
922 PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
923 abs(hw_bd_cons - sw_tx_cons));
925 while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
926 qede_free_tx_pkt(txq);
929 static int qede_drain_txq(struct qede_dev *qdev,
930 struct qede_tx_queue *txq, bool allow_drain)
932 struct ecore_dev *edev = &qdev->edev;
935 while (txq->sw_tx_cons != txq->sw_tx_prod) {
936 qede_process_tx_compl(edev, txq);
939 DP_ERR(edev, "Tx queue[%u] is stuck,"
940 "requesting MCP to drain\n",
942 rc = qdev->ops->common->drain(edev);
945 return qede_drain_txq(qdev, txq, false);
947 DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
948 "PROD=%d, CONS=%d\n",
949 txq->queue_id, txq->sw_tx_prod,
955 rte_compiler_barrier();
958 /* FW finished processing, wait for HW to transmit all tx packets */
964 /* Stops a given TX queue in the HW */
965 static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
967 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
968 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
969 struct ecore_hwfn *p_hwfn;
970 struct qede_tx_queue *txq;
974 if (tx_queue_id < qdev->num_tx_queues) {
975 txq = qdev->fp_array[tx_queue_id].txq;
977 if (qede_drain_txq(qdev, txq, true))
978 return -1; /* For the lack of retcodes */
980 hwfn_index = tx_queue_id % edev->num_hwfns;
981 p_hwfn = &edev->hwfns[hwfn_index];
982 rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
983 if (rc != ECORE_SUCCESS) {
984 DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
987 qede_tx_queue_release_mbufs(txq);
988 qede_tx_queue_reset(qdev, txq);
989 eth_dev->data->tx_queue_state[tx_queue_id] =
990 RTE_ETH_QUEUE_STATE_STOPPED;
991 DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
993 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
1000 int qede_start_queues(struct rte_eth_dev *eth_dev)
1002 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1006 for (id = 0; id < qdev->num_rx_queues; id++) {
1007 rc = qede_rx_queue_start(eth_dev, id);
1008 if (rc != ECORE_SUCCESS)
1012 for (id = 0; id < qdev->num_tx_queues; id++) {
1013 rc = qede_tx_queue_start(eth_dev, id);
1014 if (rc != ECORE_SUCCESS)
1021 void qede_stop_queues(struct rte_eth_dev *eth_dev)
1023 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1026 /* Stopping RX/TX queues */
1027 for (id = 0; id < qdev->num_tx_queues; id++)
1028 qede_tx_queue_stop(eth_dev, id);
1030 for (id = 0; id < qdev->num_rx_queues; id++)
1031 qede_rx_queue_stop(eth_dev, id);
1034 static inline bool qede_tunn_exist(uint16_t flag)
1036 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
1037 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
1040 static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
1042 return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
1043 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
1047 * qede_check_tunn_csum_l4:
1049 * 1 : If L4 csum is enabled AND if the validation has failed.
1052 static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
1054 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
1055 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
1056 return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
1057 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
1062 static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
1064 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1065 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
1066 return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1067 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
1072 /* Returns outer L2, L3 and L4 packet_type for tunneled packets */
1073 static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
1075 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
1076 struct rte_ether_hdr *eth_hdr;
1077 struct rte_ipv4_hdr *ipv4_hdr;
1078 struct rte_ipv6_hdr *ipv6_hdr;
1079 struct rte_vlan_hdr *vlan_hdr;
1081 bool vlan_tagged = 0;
1084 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1085 len = sizeof(struct rte_ether_hdr);
1086 ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
1088 /* Note: Valid only if VLAN stripping is disabled */
1089 if (ethertype == RTE_ETHER_TYPE_VLAN) {
1091 vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
1092 len += sizeof(struct rte_vlan_hdr);
1093 ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
1096 if (ethertype == RTE_ETHER_TYPE_IPV4) {
1097 packet_type |= RTE_PTYPE_L3_IPV4;
1098 ipv4_hdr = rte_pktmbuf_mtod_offset(m,
1099 struct rte_ipv4_hdr *, len);
1100 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
1101 packet_type |= RTE_PTYPE_L4_TCP;
1102 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
1103 packet_type |= RTE_PTYPE_L4_UDP;
1104 } else if (ethertype == RTE_ETHER_TYPE_IPV6) {
1105 packet_type |= RTE_PTYPE_L3_IPV6;
1106 ipv6_hdr = rte_pktmbuf_mtod_offset(m,
1107 struct rte_ipv6_hdr *, len);
1108 if (ipv6_hdr->proto == IPPROTO_TCP)
1109 packet_type |= RTE_PTYPE_L4_TCP;
1110 else if (ipv6_hdr->proto == IPPROTO_UDP)
1111 packet_type |= RTE_PTYPE_L4_UDP;
1115 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
1117 packet_type |= RTE_PTYPE_L2_ETHER;
1122 static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
1127 static const uint32_t
1128 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1129 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4 |
1130 RTE_PTYPE_INNER_L2_ETHER,
1131 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6 |
1132 RTE_PTYPE_INNER_L2_ETHER,
1133 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4 |
1134 RTE_PTYPE_INNER_L4_TCP |
1135 RTE_PTYPE_INNER_L2_ETHER,
1136 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6 |
1137 RTE_PTYPE_INNER_L4_TCP |
1138 RTE_PTYPE_INNER_L2_ETHER,
1139 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4 |
1140 RTE_PTYPE_INNER_L4_UDP |
1141 RTE_PTYPE_INNER_L2_ETHER,
1142 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6 |
1143 RTE_PTYPE_INNER_L4_UDP |
1144 RTE_PTYPE_INNER_L2_ETHER,
1145 /* Frags with no VLAN */
1146 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1147 RTE_PTYPE_INNER_L4_FRAG |
1148 RTE_PTYPE_INNER_L2_ETHER,
1149 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1150 RTE_PTYPE_INNER_L4_FRAG |
1151 RTE_PTYPE_INNER_L2_ETHER,
1153 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1154 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1155 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1156 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1157 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1158 RTE_PTYPE_INNER_L4_TCP |
1159 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1160 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1161 RTE_PTYPE_INNER_L4_TCP |
1162 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1163 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1164 RTE_PTYPE_INNER_L4_UDP |
1165 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1166 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1167 RTE_PTYPE_INNER_L4_UDP |
1168 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1169 /* Frags with VLAN */
1170 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1171 RTE_PTYPE_INNER_L4_FRAG |
1172 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1173 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1174 RTE_PTYPE_INNER_L4_FRAG |
1175 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1178 /* Bits (0..3) provides L3/L4 protocol type */
1179 /* Bits (4,5) provides frag and VLAN info */
1180 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1181 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1182 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1183 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1184 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1185 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1186 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1187 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1189 if (val < QEDE_PKT_TYPE_MAX)
1190 return ptype_lkup_tbl[val];
1192 return RTE_PTYPE_UNKNOWN;
1195 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
1200 static const uint32_t
1201 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1202 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
1203 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
1204 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 |
1207 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 |
1210 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 |
1213 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 |
1216 /* Frags with no VLAN */
1217 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4 |
1220 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6 |
1224 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4 |
1225 RTE_PTYPE_L2_ETHER_VLAN,
1226 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6 |
1227 RTE_PTYPE_L2_ETHER_VLAN,
1228 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4 |
1230 RTE_PTYPE_L2_ETHER_VLAN,
1231 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6 |
1233 RTE_PTYPE_L2_ETHER_VLAN,
1234 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4 |
1236 RTE_PTYPE_L2_ETHER_VLAN,
1237 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6 |
1239 RTE_PTYPE_L2_ETHER_VLAN,
1240 /* Frags with VLAN */
1241 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4 |
1243 RTE_PTYPE_L2_ETHER_VLAN,
1244 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6 |
1246 RTE_PTYPE_L2_ETHER_VLAN,
1249 /* Bits (0..3) provides L3/L4 protocol type */
1250 /* Bits (4,5) provides frag and VLAN info */
1251 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1252 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1253 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1254 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1255 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1256 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1257 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1258 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1260 if (val < QEDE_PKT_TYPE_MAX)
1261 return ptype_lkup_tbl[val];
1263 return RTE_PTYPE_UNKNOWN;
1266 static inline uint8_t
1267 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
1269 struct rte_ipv4_hdr *ip;
1274 val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1275 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
1277 if (unlikely(val)) {
1278 m->packet_type = qede_rx_cqe_to_pkt_type(flag);
1279 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
1280 ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
1281 sizeof(struct rte_ether_hdr));
1282 pkt_csum = ip->hdr_checksum;
1283 ip->hdr_checksum = 0;
1284 calc_csum = rte_ipv4_cksum(ip);
1285 ip->hdr_checksum = pkt_csum;
1286 return (calc_csum != pkt_csum);
1287 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
1294 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
1296 ecore_chain_consume(&rxq->rx_bd_ring);
1301 qede_reuse_page(__rte_unused struct qede_dev *qdev,
1302 struct qede_rx_queue *rxq, struct rte_mbuf *curr_cons)
1304 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
1305 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
1306 dma_addr_t new_mapping;
1308 rxq->sw_rx_ring[idx] = curr_cons;
1310 new_mapping = rte_mbuf_data_iova_default(curr_cons);
1312 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
1313 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
1319 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
1320 struct qede_dev *qdev, uint8_t count)
1322 struct rte_mbuf *curr_cons;
1324 for (; count > 0; count--) {
1325 curr_cons = rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
1326 qede_reuse_page(qdev, rxq, curr_cons);
1327 qede_rx_bd_ring_consume(rxq);
1332 qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
1333 struct qede_rx_queue *rxq,
1334 uint8_t agg_index, uint16_t len)
1336 struct qede_agg_info *tpa_info;
1337 struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
1340 /* Under certain conditions it is possible that FW may not consume
1341 * additional or new BD. So decision to consume the BD must be made
1342 * based on len_list[0].
1344 if (rte_le_to_cpu_16(len)) {
1345 tpa_info = &rxq->tpa_info[agg_index];
1346 cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1347 curr_frag = rxq->sw_rx_ring[cons_idx];
1349 curr_frag->nb_segs = 1;
1350 curr_frag->pkt_len = rte_le_to_cpu_16(len);
1351 curr_frag->data_len = curr_frag->pkt_len;
1352 tpa_info->tpa_tail->next = curr_frag;
1353 tpa_info->tpa_tail = curr_frag;
1354 qede_rx_bd_ring_consume(rxq);
1355 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1356 PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
1357 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1358 rxq->rx_alloc_errors++;
1364 qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
1365 struct qede_rx_queue *rxq,
1366 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1368 PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
1369 cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
1370 /* only len_list[0] will have value */
1371 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1376 qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
1377 struct qede_rx_queue *rxq,
1378 struct eth_fast_path_rx_tpa_end_cqe *cqe)
1380 struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
1382 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1384 /* Update total length and frags based on end TPA */
1385 rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
1386 /* TODO: Add Sanity Checks */
1387 rx_mb->nb_segs = cqe->num_of_bds;
1388 rx_mb->pkt_len = cqe->total_packet_len;
1390 PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
1391 " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
1392 rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
1396 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
1401 static const uint32_t
1402 ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
1403 [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
1404 [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
1405 [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
1406 [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
1407 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
1408 RTE_PTYPE_TUNNEL_GENEVE,
1409 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
1410 RTE_PTYPE_TUNNEL_GRE,
1411 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
1412 RTE_PTYPE_TUNNEL_VXLAN,
1413 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
1414 RTE_PTYPE_TUNNEL_GENEVE,
1415 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
1416 RTE_PTYPE_TUNNEL_GRE,
1417 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
1418 RTE_PTYPE_TUNNEL_VXLAN,
1419 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
1420 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1421 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
1422 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1423 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
1424 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1425 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
1426 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1427 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
1428 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1429 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
1430 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1431 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
1432 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1433 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
1434 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1435 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
1436 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1437 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
1438 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1439 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
1440 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1441 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
1442 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1445 /* Cover bits[4-0] to include tunn_type and next protocol */
1446 val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
1447 ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
1448 (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
1449 ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
1451 if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
1452 return ptype_tunn_lkup_tbl[val];
1454 return RTE_PTYPE_UNKNOWN;
1458 qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
1459 uint8_t num_segs, uint16_t pkt_len)
1461 struct qede_rx_queue *rxq = p_rxq;
1462 struct qede_dev *qdev = rxq->qdev;
1463 register struct rte_mbuf *seg1 = NULL;
1464 register struct rte_mbuf *seg2 = NULL;
1465 uint16_t sw_rx_index;
1470 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1472 if (unlikely(!cur_size)) {
1473 PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
1474 " left for mapping jumbo\n", num_segs);
1475 qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
1478 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1479 seg2 = rxq->sw_rx_ring[sw_rx_index];
1480 qede_rx_bd_ring_consume(rxq);
1481 pkt_len -= cur_size;
1482 seg2->data_len = cur_size;
1492 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1494 print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
1497 PMD_RX_LOG(INFO, rxq,
1498 "len 0x%04x bf 0x%04x hash_val 0x%x"
1499 " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
1500 " inner_l2=%s inner_l3=%s inner_l4=%s\n",
1501 m->data_len, bitfield, m->hash.rss,
1502 (unsigned long)m->ol_flags,
1503 rte_get_ptype_l2_name(m->packet_type),
1504 rte_get_ptype_l3_name(m->packet_type),
1505 rte_get_ptype_l4_name(m->packet_type),
1506 rte_get_ptype_tunnel_name(m->packet_type),
1507 rte_get_ptype_inner_l2_name(m->packet_type),
1508 rte_get_ptype_inner_l3_name(m->packet_type),
1509 rte_get_ptype_inner_l4_name(m->packet_type));
1514 qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1516 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1517 register struct rte_mbuf *rx_mb = NULL;
1518 struct qede_rx_queue *rxq = p_rxq;
1519 struct qede_dev *qdev = rxq->qdev;
1520 struct ecore_dev *edev = &qdev->edev;
1521 union eth_rx_cqe *cqe;
1523 enum eth_rx_cqe_type cqe_type;
1524 int rss_enable = qdev->rss_enable;
1525 int rx_alloc_count = 0;
1526 uint32_t packet_type;
1528 uint16_t vlan_tci, port_id;
1529 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index, num_rx_bds;
1530 uint16_t rx_pkt = 0;
1531 uint16_t pkt_len = 0;
1532 uint16_t len; /* Length of first BD */
1533 uint16_t preload_idx;
1534 uint16_t parse_flag;
1535 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1536 uint8_t bitfield_val;
1538 uint8_t offset, flags, bd_num;
1541 /* Allocate buffers that we used in previous loop */
1542 if (rxq->rx_alloc_count) {
1543 count = rxq->rx_alloc_count > QEDE_MAX_BULK_ALLOC_COUNT ?
1544 QEDE_MAX_BULK_ALLOC_COUNT : rxq->rx_alloc_count;
1546 if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, count))) {
1547 struct rte_eth_dev *dev;
1549 PMD_RX_LOG(ERR, rxq,
1550 "New buffers allocation failed,"
1551 "dropping incoming packets\n");
1552 dev = &rte_eth_devices[rxq->port_id];
1553 dev->data->rx_mbuf_alloc_failed += count;
1554 rxq->rx_alloc_errors += count;
1557 qede_update_rx_prod(qdev, rxq);
1558 rxq->rx_alloc_count -= count;
1561 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1562 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1566 if (hw_comp_cons == sw_comp_cons)
1569 num_rx_bds = NUM_RX_BDS(rxq);
1570 port_id = rxq->port_id;
1572 while (sw_comp_cons != hw_comp_cons) {
1574 packet_type = RTE_PTYPE_UNKNOWN;
1578 /* Get the CQE from the completion ring */
1580 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1581 cqe_type = cqe->fast_path_regular.type;
1582 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1584 if (likely(cqe_type == ETH_RX_CQE_TYPE_REGULAR)) {
1585 fp_cqe = &cqe->fast_path_regular;
1587 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
1588 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1589 ecore_eth_cqe_completion
1590 (&edev->hwfns[rxq->queue_id %
1592 (struct eth_slow_path_rx_cqe *)cqe);
1597 /* Get the data from the SW ring */
1598 sw_rx_index = rxq->sw_rx_cons & num_rx_bds;
1599 rx_mb = rxq->sw_rx_ring[sw_rx_index];
1600 assert(rx_mb != NULL);
1602 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1603 offset = fp_cqe->placement_offset;
1604 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1605 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1606 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1607 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1608 bd_num = fp_cqe->bd_num;
1609 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1610 bitfield_val = fp_cqe->bitfields;
1613 if (unlikely(qede_tunn_exist(parse_flag))) {
1614 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1615 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1616 PMD_RX_LOG(ERR, rxq,
1617 "L4 csum failed, flags = 0x%x\n",
1619 rxq->rx_hw_errors++;
1620 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1622 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1625 if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1626 PMD_RX_LOG(ERR, rxq,
1627 "Outer L3 csum failed, flags = 0x%x\n",
1629 rxq->rx_hw_errors++;
1630 ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1632 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1635 flags = fp_cqe->tunnel_pars_flags.flags;
1639 qede_rx_cqe_to_tunn_pkt_type(flags);
1643 qede_rx_cqe_to_pkt_type_inner(parse_flag);
1645 /* Outer L3/L4 types is not available in CQE */
1646 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1648 /* Outer L3/L4 types is not available in CQE.
1649 * Need to add offset to parse correctly,
1651 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1652 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1654 packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1657 /* Common handling for non-tunnel packets and for inner
1658 * headers in the case of tunnel.
1660 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1661 PMD_RX_LOG(ERR, rxq,
1662 "L4 csum failed, flags = 0x%x\n",
1664 rxq->rx_hw_errors++;
1665 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1667 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1669 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1670 PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1672 rxq->rx_hw_errors++;
1673 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1675 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1678 if (unlikely(CQE_HAS_VLAN(parse_flag) ||
1679 CQE_HAS_OUTER_VLAN(parse_flag))) {
1680 /* Note: FW doesn't indicate Q-in-Q packet */
1681 ol_flags |= RTE_MBUF_F_RX_VLAN;
1682 if (qdev->vlan_strip_flg) {
1683 ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
1684 rx_mb->vlan_tci = vlan_tci;
1689 ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
1690 rx_mb->hash.rss = rss_hash;
1694 qede_rx_bd_ring_consume(rxq);
1696 /* Prefetch next mbuf while processing current one. */
1697 preload_idx = rxq->sw_rx_cons & num_rx_bds;
1698 rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
1700 /* Update rest of the MBUF fields */
1701 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1702 rx_mb->port = port_id;
1703 rx_mb->ol_flags = ol_flags;
1704 rx_mb->data_len = len;
1705 rx_mb->packet_type = packet_type;
1706 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1707 print_rx_bd_info(rx_mb, rxq, bitfield_val);
1709 rx_mb->nb_segs = bd_num;
1710 rx_mb->pkt_len = pkt_len;
1712 rx_pkts[rx_pkt] = rx_mb;
1716 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1717 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1718 if (rx_pkt == nb_pkts) {
1719 PMD_RX_LOG(DEBUG, rxq,
1720 "Budget reached nb_pkts=%u received=%u",
1726 /* Request number of buffers to be allocated in next loop */
1727 rxq->rx_alloc_count += rx_alloc_count;
1729 rxq->rcv_pkts += rx_pkt;
1730 rxq->rx_segs += rx_pkt;
1731 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
1737 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1739 struct qede_rx_queue *rxq = p_rxq;
1740 struct qede_dev *qdev = rxq->qdev;
1741 struct ecore_dev *edev = &qdev->edev;
1742 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
1743 uint16_t rx_pkt = 0;
1744 union eth_rx_cqe *cqe;
1745 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1746 register struct rte_mbuf *rx_mb = NULL;
1747 register struct rte_mbuf *seg1 = NULL;
1748 enum eth_rx_cqe_type cqe_type;
1749 uint16_t pkt_len = 0; /* Sum of all BD segments */
1750 uint16_t len; /* Length of first BD */
1751 uint8_t num_segs = 1;
1752 uint16_t preload_idx;
1753 uint16_t parse_flag;
1754 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1755 uint8_t bitfield_val;
1757 uint8_t tunn_parse_flag;
1758 struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
1760 uint32_t packet_type;
1763 uint8_t offset, tpa_agg_idx, flags;
1764 struct qede_agg_info *tpa_info = NULL;
1766 int rx_alloc_count = 0;
1769 /* Allocate buffers that we used in previous loop */
1770 if (rxq->rx_alloc_count) {
1771 count = rxq->rx_alloc_count > QEDE_MAX_BULK_ALLOC_COUNT ?
1772 QEDE_MAX_BULK_ALLOC_COUNT : rxq->rx_alloc_count;
1774 if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, count))) {
1775 struct rte_eth_dev *dev;
1777 PMD_RX_LOG(ERR, rxq,
1778 "New buffers allocation failed,"
1779 "dropping incoming packets\n");
1780 dev = &rte_eth_devices[rxq->port_id];
1781 dev->data->rx_mbuf_alloc_failed += count;
1782 rxq->rx_alloc_errors += count;
1785 qede_update_rx_prod(qdev, rxq);
1786 rxq->rx_alloc_count -= count;
1789 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1790 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1794 if (hw_comp_cons == sw_comp_cons)
1797 while (sw_comp_cons != hw_comp_cons) {
1799 packet_type = RTE_PTYPE_UNKNOWN;
1801 tpa_start_flg = false;
1804 /* Get the CQE from the completion ring */
1806 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1807 cqe_type = cqe->fast_path_regular.type;
1808 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1811 case ETH_RX_CQE_TYPE_REGULAR:
1812 fp_cqe = &cqe->fast_path_regular;
1814 case ETH_RX_CQE_TYPE_TPA_START:
1815 cqe_start_tpa = &cqe->fast_path_tpa_start;
1816 tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
1817 tpa_start_flg = true;
1818 /* Mark it as LRO packet */
1819 ol_flags |= RTE_MBUF_F_RX_LRO;
1820 /* In split mode, seg_len is same as len_on_first_bd
1821 * and bw_ext_bd_len_list will be empty since there are
1822 * no additional buffers
1824 PMD_RX_LOG(INFO, rxq,
1825 "TPA start[%d] - len_on_first_bd %d header %d"
1826 " [bd_list[0] %d], [seg_len %d]\n",
1827 cqe_start_tpa->tpa_agg_index,
1828 rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
1829 cqe_start_tpa->header_len,
1830 rte_le_to_cpu_16(cqe_start_tpa->bw_ext_bd_len_list[0]),
1831 rte_le_to_cpu_16(cqe_start_tpa->seg_len));
1834 case ETH_RX_CQE_TYPE_TPA_CONT:
1835 qede_rx_process_tpa_cont_cqe(qdev, rxq,
1836 &cqe->fast_path_tpa_cont);
1838 case ETH_RX_CQE_TYPE_TPA_END:
1839 qede_rx_process_tpa_end_cqe(qdev, rxq,
1840 &cqe->fast_path_tpa_end);
1841 tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
1842 tpa_info = &rxq->tpa_info[tpa_agg_idx];
1843 rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
1845 case ETH_RX_CQE_TYPE_SLOW_PATH:
1846 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1847 ecore_eth_cqe_completion(
1848 &edev->hwfns[rxq->queue_id % edev->num_hwfns],
1849 (struct eth_slow_path_rx_cqe *)cqe);
1855 /* Get the data from the SW ring */
1856 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1857 rx_mb = rxq->sw_rx_ring[sw_rx_index];
1858 assert(rx_mb != NULL);
1860 /* Handle regular CQE or TPA start CQE */
1861 if (!tpa_start_flg) {
1862 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1863 offset = fp_cqe->placement_offset;
1864 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1865 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1866 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1867 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1868 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1869 bitfield_val = fp_cqe->bitfields;
1873 rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
1874 offset = cqe_start_tpa->placement_offset;
1875 /* seg_len = len_on_first_bd */
1876 len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
1877 vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
1878 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1879 bitfield_val = cqe_start_tpa->bitfields;
1881 rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
1883 if (qede_tunn_exist(parse_flag)) {
1884 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1885 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1886 PMD_RX_LOG(ERR, rxq,
1887 "L4 csum failed, flags = 0x%x\n",
1889 rxq->rx_hw_errors++;
1890 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1892 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1895 if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1896 PMD_RX_LOG(ERR, rxq,
1897 "Outer L3 csum failed, flags = 0x%x\n",
1899 rxq->rx_hw_errors++;
1900 ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1902 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1906 flags = cqe_start_tpa->tunnel_pars_flags.flags;
1908 flags = fp_cqe->tunnel_pars_flags.flags;
1909 tunn_parse_flag = flags;
1913 qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
1917 qede_rx_cqe_to_pkt_type_inner(parse_flag);
1919 /* Outer L3/L4 types is not available in CQE */
1920 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1922 /* Outer L3/L4 types is not available in CQE.
1923 * Need to add offset to parse correctly,
1925 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1926 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1928 packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1931 /* Common handling for non-tunnel packets and for inner
1932 * headers in the case of tunnel.
1934 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1935 PMD_RX_LOG(ERR, rxq,
1936 "L4 csum failed, flags = 0x%x\n",
1938 rxq->rx_hw_errors++;
1939 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1941 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1943 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1944 PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1946 rxq->rx_hw_errors++;
1947 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1949 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1952 if (CQE_HAS_VLAN(parse_flag) ||
1953 CQE_HAS_OUTER_VLAN(parse_flag)) {
1954 /* Note: FW doesn't indicate Q-in-Q packet */
1955 ol_flags |= RTE_MBUF_F_RX_VLAN;
1956 if (qdev->vlan_strip_flg) {
1957 ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
1958 rx_mb->vlan_tci = vlan_tci;
1963 if (qdev->rss_enable) {
1964 ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
1965 rx_mb->hash.rss = rss_hash;
1969 qede_rx_bd_ring_consume(rxq);
1971 if (!tpa_start_flg && fp_cqe->bd_num > 1) {
1972 PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
1973 " len on first: %04x Total Len: %04x",
1974 fp_cqe->bd_num, len, pkt_len);
1975 num_segs = fp_cqe->bd_num - 1;
1977 if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
1981 rx_alloc_count += num_segs;
1982 rxq->rx_segs += num_segs;
1984 rxq->rx_segs++; /* for the first segment */
1986 /* Prefetch next mbuf while processing current one. */
1987 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1988 rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
1990 /* Update rest of the MBUF fields */
1991 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1992 rx_mb->port = rxq->port_id;
1993 rx_mb->ol_flags = ol_flags;
1994 rx_mb->data_len = len;
1995 rx_mb->packet_type = packet_type;
1996 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1997 print_rx_bd_info(rx_mb, rxq, bitfield_val);
1999 if (!tpa_start_flg) {
2000 rx_mb->nb_segs = fp_cqe->bd_num;
2001 rx_mb->pkt_len = pkt_len;
2003 /* store ref to the updated mbuf */
2004 tpa_info->tpa_head = rx_mb;
2005 tpa_info->tpa_tail = tpa_info->tpa_head;
2007 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
2009 if (!tpa_start_flg) {
2010 rx_pkts[rx_pkt] = rx_mb;
2014 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
2015 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2016 if (rx_pkt == nb_pkts) {
2017 PMD_RX_LOG(DEBUG, rxq,
2018 "Budget reached nb_pkts=%u received=%u",
2024 /* Request number of buffers to be allocated in next loop */
2025 rxq->rx_alloc_count += rx_alloc_count;
2027 rxq->rcv_pkts += rx_pkt;
2029 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
2035 qede_recv_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2037 struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
2038 uint16_t eng0_pkts, eng1_pkts;
2040 eng0_pkts = nb_pkts / 2;
2042 eng0_pkts = qede_recv_pkts(fp_cmt->fp0->rxq, rx_pkts, eng0_pkts);
2044 eng1_pkts = nb_pkts - eng0_pkts;
2046 eng1_pkts = qede_recv_pkts(fp_cmt->fp1->rxq, rx_pkts + eng0_pkts,
2049 return eng0_pkts + eng1_pkts;
2052 /* Populate scatter gather buffer descriptor fields */
2053 static inline uint16_t
2054 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
2055 struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
2058 struct qede_tx_queue *txq = p_txq;
2059 struct eth_tx_bd *tx_bd = NULL;
2061 uint16_t nb_segs = 0;
2063 /* Check for scattered buffers */
2065 if (start_seg == 0) {
2067 *bd2 = (struct eth_tx_2nd_bd *)
2068 ecore_chain_produce(&txq->tx_pbl);
2069 memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
2072 mapping = rte_mbuf_data_iova(m_seg);
2073 QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
2074 PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
2075 } else if (start_seg == 1) {
2077 *bd3 = (struct eth_tx_3rd_bd *)
2078 ecore_chain_produce(&txq->tx_pbl);
2079 memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
2082 mapping = rte_mbuf_data_iova(m_seg);
2083 QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
2084 PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
2086 tx_bd = (struct eth_tx_bd *)
2087 ecore_chain_produce(&txq->tx_pbl);
2088 memset(tx_bd, 0, sizeof(*tx_bd));
2090 mapping = rte_mbuf_data_iova(m_seg);
2091 QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
2092 PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
2095 m_seg = m_seg->next;
2098 /* Return total scattered buffers */
2102 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2104 print_tx_bd_info(struct qede_tx_queue *txq,
2105 struct eth_tx_1st_bd *bd1,
2106 struct eth_tx_2nd_bd *bd2,
2107 struct eth_tx_3rd_bd *bd3,
2108 uint64_t tx_ol_flags)
2110 char ol_buf[256] = { 0 }; /* for verbose prints */
2113 PMD_TX_LOG(INFO, txq,
2114 "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
2115 rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
2116 bd1->data.bd_flags.bitfields,
2117 rte_cpu_to_le_16(bd1->data.bitfields));
2119 PMD_TX_LOG(INFO, txq,
2120 "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
2121 rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
2122 bd2->data.bitfields2, bd2->data.tunn_ip_size);
2124 PMD_TX_LOG(INFO, txq,
2125 "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
2126 "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
2127 rte_cpu_to_le_16(bd3->nbytes),
2128 rte_cpu_to_le_16(bd3->data.bitfields),
2129 rte_cpu_to_le_16(bd3->data.lso_mss),
2130 bd3->data.tunn_l4_hdr_start_offset_w,
2131 bd3->data.tunn_hdr_size_w);
2133 rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
2134 PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
2138 /* TX prepare to check packets meets TX conditions */
2140 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2141 qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
2144 struct qede_tx_queue *txq = p_txq;
2146 qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
2153 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2157 for (i = 0; i < nb_pkts; i++) {
2159 ol_flags = m->ol_flags;
2160 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2161 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
2165 /* TBD: confirm its ~9700B for both ? */
2166 if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
2171 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
2176 if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
2177 /* We support only limited tunnel protocols */
2178 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
2181 temp = ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
2182 if (temp == RTE_MBUF_F_TX_TUNNEL_VXLAN ||
2183 temp == RTE_MBUF_F_TX_TUNNEL_GENEVE ||
2184 temp == RTE_MBUF_F_TX_TUNNEL_MPLSINUDP ||
2185 temp == RTE_MBUF_F_TX_TUNNEL_GRE)
2189 rte_errno = ENOTSUP;
2193 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2194 ret = rte_validate_tx_offload(m);
2202 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2203 if (unlikely(i != nb_pkts))
2204 PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
2210 #define MPLSINUDP_HDR_SIZE (12)
2212 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2214 qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
2215 struct qede_tx_queue *txq)
2217 if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
2218 PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
2219 if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
2220 MPLSINUDP_HDR_SIZE) / 2) > 0xff)
2221 PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
2222 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
2223 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
2224 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
2225 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
2226 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
2227 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
2232 qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2234 struct qede_tx_queue *txq = p_txq;
2235 struct qede_dev *qdev = txq->qdev;
2236 struct ecore_dev *edev = &qdev->edev;
2237 struct eth_tx_1st_bd *bd1;
2238 struct eth_tx_2nd_bd *bd2;
2239 struct eth_tx_3rd_bd *bd3;
2240 struct rte_mbuf *m_seg = NULL;
2241 struct rte_mbuf *mbuf;
2242 struct rte_mbuf **sw_tx_ring;
2243 uint16_t nb_tx_pkts;
2246 uint16_t nb_frags = 0;
2247 uint16_t nb_pkt_sent = 0;
2249 uint64_t tx_ol_flags;
2252 uint8_t bd1_bd_flags_bf;
2254 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
2255 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
2256 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
2257 qede_process_tx_compl(edev, txq);
2260 nb_tx_pkts = nb_pkts;
2261 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2262 sw_tx_ring = txq->sw_tx_ring;
2264 while (nb_tx_pkts--) {
2265 /* Init flags/values */
2271 bd1_bd_flags_bf = 0;
2278 /* Check minimum TX BDS availability against available BDs */
2279 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
2282 tx_ol_flags = mbuf->ol_flags;
2283 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2285 if (unlikely(txq->nb_tx_avail <
2286 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2289 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2290 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2292 /* Offload the IP checksum in the hardware */
2293 if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
2295 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2297 /* L4 checksum offload (tcp or udp) */
2298 if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
2299 (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM)))
2301 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2303 /* Fill the entry in the SW ring and the BDs in the FW ring */
2305 sw_tx_ring[idx] = mbuf;
2308 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2309 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2312 /* Map MBUF linear data for DMA and set in the BD1 */
2313 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2315 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2316 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2318 /* Handle fragmented MBUF */
2319 if (unlikely(mbuf->nb_segs > 1)) {
2322 /* Encode scatter gather buffer descriptors */
2323 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3,
2327 bd1->data.nbds = nbds + nb_frags;
2329 txq->nb_tx_avail -= bd1->data.nbds;
2332 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2333 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2334 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2340 /* Write value of prod idx into bd_prod */
2341 txq->tx_db.data.bd_prod = bd_prod;
2343 rte_compiler_barrier();
2344 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2347 /* Check again for Tx completions */
2348 qede_process_tx_compl(edev, txq);
2350 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2351 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2357 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2359 struct qede_tx_queue *txq = p_txq;
2360 struct qede_dev *qdev = txq->qdev;
2361 struct ecore_dev *edev = &qdev->edev;
2362 struct rte_mbuf *mbuf;
2363 struct rte_mbuf *m_seg = NULL;
2364 uint16_t nb_tx_pkts;
2368 uint16_t nb_pkt_sent = 0;
2372 __rte_unused bool tunn_flg;
2373 bool tunn_ipv6_ext_flg;
2374 struct eth_tx_1st_bd *bd1;
2375 struct eth_tx_2nd_bd *bd2;
2376 struct eth_tx_3rd_bd *bd3;
2377 uint64_t tx_ol_flags;
2381 uint8_t bd1_bd_flags_bf;
2390 uint8_t tunn_l4_hdr_start_offset;
2391 uint8_t tunn_hdr_size;
2392 uint8_t inner_l2_hdr_size;
2393 uint16_t inner_l4_hdr_offset;
2395 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
2396 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
2397 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
2398 qede_process_tx_compl(edev, txq);
2401 nb_tx_pkts = nb_pkts;
2402 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2403 while (nb_tx_pkts--) {
2404 /* Init flags/values */
2414 bd1_bd_flags_bf = 0;
2419 mplsoudp_flg = false;
2420 tunn_ipv6_ext_flg = false;
2422 tunn_l4_hdr_start_offset = 0;
2427 /* Check minimum TX BDS availability against available BDs */
2428 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
2431 tx_ol_flags = mbuf->ol_flags;
2432 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2434 /* TX prepare would have already checked supported tunnel Tx
2435 * offloads. Don't rely on pkt_type marked by Rx, instead use
2436 * tx_ol_flags to decide.
2438 tunn_flg = !!(tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
2441 /* Check against max which is Tunnel IPv6 + ext */
2442 if (unlikely(txq->nb_tx_avail <
2443 ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
2446 /* First indicate its a tunnel pkt */
2447 bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
2448 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2449 /* Legacy FW had flipped behavior in regard to this bit
2450 * i.e. it needed to set to prevent FW from touching
2451 * encapsulated packets when it didn't need to.
2453 if (unlikely(txq->is_legacy)) {
2455 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2458 /* Outer IP checksum offload */
2459 if (tx_ol_flags & (RTE_MBUF_F_TX_OUTER_IP_CKSUM |
2460 RTE_MBUF_F_TX_OUTER_IPV4)) {
2462 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
2463 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
2467 * Currently, only inner checksum offload in MPLS-in-UDP
2468 * tunnel with one MPLS label is supported. Both outer
2469 * and inner layers lengths need to be provided in
2472 if ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ==
2473 RTE_MBUF_F_TX_TUNNEL_MPLSINUDP) {
2474 mplsoudp_flg = true;
2475 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2476 qede_mpls_tunn_tx_sanity_check(mbuf, txq);
2478 /* Outer L4 offset in two byte words */
2479 tunn_l4_hdr_start_offset =
2480 (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
2481 /* Tunnel header size in two byte words */
2482 tunn_hdr_size = (mbuf->outer_l2_len +
2483 mbuf->outer_l3_len +
2484 MPLSINUDP_HDR_SIZE) / 2;
2485 /* Inner L2 header size in two byte words */
2486 inner_l2_hdr_size = (mbuf->l2_len -
2487 MPLSINUDP_HDR_SIZE) / 2;
2488 /* Inner L4 header offset from the beginning
2489 * of inner packet in two byte words
2491 inner_l4_hdr_offset = (mbuf->l2_len -
2492 MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
2494 /* Inner L2 size and address type */
2495 bd2_bf1 |= (inner_l2_hdr_size &
2496 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
2497 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
2498 bd2_bf1 |= (UNICAST_ADDRESS &
2499 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
2500 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
2501 /* Treated as IPv6+Ext */
2503 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
2505 /* Mark inner IPv6 if present */
2506 if (tx_ol_flags & RTE_MBUF_F_TX_IPV6)
2508 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
2510 /* Inner L4 offsets */
2511 if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
2512 (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM |
2513 RTE_MBUF_F_TX_TCP_CKSUM))) {
2514 /* Determines if BD3 is needed */
2515 tunn_ipv6_ext_flg = true;
2516 if ((tx_ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
2517 RTE_MBUF_F_TX_UDP_CKSUM) {
2519 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
2522 /* TODO other pseudo checksum modes are
2526 ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
2527 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
2528 bd2_bf2 |= (inner_l4_hdr_offset &
2529 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
2530 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
2532 } /* End MPLSoUDP */
2533 } /* End Tunnel handling */
2535 if (tx_ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2537 if (unlikely(txq->nb_tx_avail <
2538 ETH_TX_MIN_BDS_PER_LSO_PKT))
2540 /* For LSO, packet header and payload must reside on
2541 * buffers pointed by different BDs. Using BD1 for HDR
2542 * and BD2 onwards for data.
2544 hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
2546 hdr_size += mbuf->outer_l2_len +
2549 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
2551 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2552 /* RTE_MBUF_F_TX_TCP_SEG implies RTE_MBUF_F_TX_TCP_CKSUM */
2554 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2555 mss = rte_cpu_to_le_16(mbuf->tso_segsz);
2556 /* Using one header BD */
2557 bd3_bf |= rte_cpu_to_le_16(1 <<
2558 ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2560 if (unlikely(txq->nb_tx_avail <
2561 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2564 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2565 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2568 /* Descriptor based VLAN insertion */
2569 if (tx_ol_flags & RTE_MBUF_F_TX_VLAN) {
2570 vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
2572 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
2575 /* Offload the IP checksum in the hardware */
2576 if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
2578 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2579 /* There's no DPDK flag to request outer-L4 csum
2580 * offload. But in the case of tunnel if inner L3 or L4
2581 * csum offload is requested then we need to force
2582 * recalculation of L4 tunnel header csum also.
2584 if (tunn_flg && ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) !=
2585 RTE_MBUF_F_TX_TUNNEL_GRE)) {
2587 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2588 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2592 /* L4 checksum offload (tcp or udp) */
2593 if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
2594 (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM))) {
2596 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2597 /* There's no DPDK flag to request outer-L4 csum
2598 * offload. But in the case of tunnel if inner L3 or L4
2599 * csum offload is requested then we need to force
2600 * recalculation of L4 tunnel header csum also.
2604 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2605 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2609 /* Fill the entry in the SW ring and the BDs in the FW ring */
2611 txq->sw_tx_ring[idx] = mbuf;
2614 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2615 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2618 /* Map MBUF linear data for DMA and set in the BD1 */
2619 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2621 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2622 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2623 bd1->data.vlan = vlan;
2625 if (lso_flg || mplsoudp_flg) {
2626 bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
2628 memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
2632 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2635 QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
2636 rte_mbuf_data_iova(mbuf)),
2637 mbuf->data_len - hdr_size);
2638 bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
2640 bd2->data.bitfields2 =
2641 rte_cpu_to_le_16(bd2_bf2);
2643 bd2->data.tunn_ip_size =
2644 rte_cpu_to_le_16(mbuf->outer_l3_len);
2647 if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
2648 bd3 = (struct eth_tx_3rd_bd *)
2649 ecore_chain_produce(&txq->tx_pbl);
2650 memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
2652 bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
2654 bd3->data.lso_mss = mss;
2656 bd3->data.tunn_l4_hdr_start_offset_w =
2657 tunn_l4_hdr_start_offset;
2658 bd3->data.tunn_hdr_size_w =
2664 /* Handle fragmented MBUF */
2667 /* Encode scatter gather buffer descriptors if required */
2668 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
2669 bd1->data.nbds = nbds + nb_frags;
2671 txq->nb_tx_avail -= bd1->data.nbds;
2674 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2675 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2676 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2682 /* Write value of prod idx into bd_prod */
2683 txq->tx_db.data.bd_prod = bd_prod;
2685 rte_compiler_barrier();
2686 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2689 /* Check again for Tx completions */
2690 qede_process_tx_compl(edev, txq);
2692 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2693 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2699 qede_xmit_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2701 struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
2702 uint16_t eng0_pkts, eng1_pkts;
2704 eng0_pkts = nb_pkts / 2;
2706 eng0_pkts = qede_xmit_pkts(fp_cmt->fp0->txq, tx_pkts, eng0_pkts);
2708 eng1_pkts = nb_pkts - eng0_pkts;
2710 eng1_pkts = qede_xmit_pkts(fp_cmt->fp1->txq, tx_pkts + eng0_pkts,
2713 return eng0_pkts + eng1_pkts;
2716 /* this function does a fake walk through over completion queue
2717 * to calculate number of BDs used by HW.
2718 * At the end, it restores the state of completion queue.
2721 qede_parse_fp_cqe(struct qede_rx_queue *rxq)
2723 uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
2724 union eth_rx_cqe *cqe, *orig_cqe = NULL;
2726 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
2727 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2729 if (hw_comp_cons == sw_comp_cons)
2732 /* Get the CQE from the completion ring */
2733 cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2736 while (sw_comp_cons != hw_comp_cons) {
2737 switch (cqe->fast_path_regular.type) {
2738 case ETH_RX_CQE_TYPE_REGULAR:
2739 bd_count += cqe->fast_path_regular.bd_num;
2741 case ETH_RX_CQE_TYPE_TPA_END:
2742 bd_count += cqe->fast_path_tpa_end.num_of_bds;
2749 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2750 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2753 /* revert comp_ring to original state */
2754 ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
2760 qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
2762 uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
2763 uint16_t produced, consumed;
2764 struct qede_rx_queue *rxq = p_rxq;
2766 if (offset > rxq->nb_rx_desc)
2769 sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
2770 sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
2772 /* find BDs used by HW from completion queue elements */
2773 hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
2775 if (hw_bd_cons < sw_bd_cons)
2776 /* wraparound case */
2777 consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
2779 consumed = hw_bd_cons - sw_bd_cons;
2781 if (offset <= consumed)
2782 return RTE_ETH_RX_DESC_DONE;
2784 if (sw_bd_prod < sw_bd_cons)
2785 /* wraparound case */
2786 produced = (0xffff - sw_bd_cons) + sw_bd_prod;
2788 produced = sw_bd_prod - sw_bd_cons;
2790 if (offset <= produced)
2791 return RTE_ETH_RX_DESC_AVAIL;
2793 return RTE_ETH_RX_DESC_UNAVAIL;