1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
10 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
12 struct rte_mbuf *new_mb = NULL;
13 struct eth_rx_bd *rx_bd;
15 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
17 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
18 if (unlikely(!new_mb)) {
20 "Failed to allocate rx buffer "
21 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
22 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
23 rte_mempool_avail_count(rxq->mb_pool),
24 rte_mempool_in_use_count(rxq->mb_pool));
27 rxq->sw_rx_ring[idx].mbuf = new_mb;
28 rxq->sw_rx_ring[idx].page_offset = 0;
29 mapping = rte_mbuf_data_iova_default(new_mb);
30 /* Advance PROD and get BD pointer */
31 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
32 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
33 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
38 #define QEDE_MAX_BULK_ALLOC_COUNT 512
40 static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
42 void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
43 struct rte_mbuf *mbuf = NULL;
44 struct eth_rx_bd *rx_bd;
49 if (count > QEDE_MAX_BULK_ALLOC_COUNT)
50 count = QEDE_MAX_BULK_ALLOC_COUNT;
52 ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
55 "Failed to allocate %d rx buffers "
56 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
58 rxq->sw_rx_prod & NUM_RX_BDS(rxq),
59 rxq->sw_rx_cons & NUM_RX_BDS(rxq),
60 rte_mempool_avail_count(rxq->mb_pool),
61 rte_mempool_in_use_count(rxq->mb_pool));
65 for (i = 0; i < count; i++) {
67 if (likely(i < count - 1))
68 rte_prefetch0(obj_p[i + 1]);
70 idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
71 rxq->sw_rx_ring[idx].mbuf = mbuf;
72 rxq->sw_rx_ring[idx].page_offset = 0;
73 mapping = rte_mbuf_data_iova_default(mbuf);
74 rx_bd = (struct eth_rx_bd *)
75 ecore_chain_produce(&rxq->rx_bd_ring);
76 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
77 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
84 /* Criterias for calculating Rx buffer size -
85 * 1) rx_buf_size should not exceed the size of mbuf
86 * 2) In scattered_rx mode - minimum rx_buf_size should be
87 * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
88 * 3) In regular mode - minimum rx_buf_size should be
89 * (MTU + Maximum L2 Header Size + 2)
90 * In above cases +2 corrosponds to 2 bytes padding in front of L2
92 * 4) rx_buf_size should be cacheline-size aligned. So considering
93 * criteria 1, we need to adjust the size to floor instead of ceil,
94 * so that we don't exceed mbuf size while ceiling rx_buf_size.
97 qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
98 uint16_t max_frame_size)
100 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
101 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
104 if (dev->data->scattered_rx) {
105 /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
106 * bufferes can be used for single packet. So need to make sure
107 * mbuf size is sufficient enough for this.
109 if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
110 (max_frame_size + QEDE_ETH_OVERHEAD)) {
111 DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
112 mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
116 rx_buf_size = RTE_MAX(mbufsz,
117 (max_frame_size + QEDE_ETH_OVERHEAD) /
118 ETH_RX_MAX_BUFF_PER_PKT);
120 rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
123 /* Align to cache-line size if needed */
124 return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
127 static struct qede_rx_queue *
128 qede_alloc_rx_queue_mem(struct rte_eth_dev *dev,
131 unsigned int socket_id,
132 struct rte_mempool *mp,
135 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
136 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
137 struct qede_rx_queue *rxq;
141 /* First allocate the rx queue data structure */
142 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
143 RTE_CACHE_LINE_SIZE, socket_id);
146 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
153 rxq->nb_rx_desc = nb_desc;
154 rxq->queue_id = queue_idx;
155 rxq->port_id = dev->data->port_id;
158 rxq->rx_buf_size = bufsz;
160 DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
161 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
163 /* Allocate the parallel driver ring for Rx buffers */
164 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
165 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
166 RTE_CACHE_LINE_SIZE, socket_id);
167 if (!rxq->sw_rx_ring) {
168 DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
169 " socket %u\n", socket_id);
174 /* Allocate FW Rx ring */
175 rc = qdev->ops->common->chain_alloc(edev,
176 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
177 ECORE_CHAIN_MODE_NEXT_PTR,
178 ECORE_CHAIN_CNT_TYPE_U16,
180 sizeof(struct eth_rx_bd),
184 if (rc != ECORE_SUCCESS) {
185 DP_ERR(edev, "Memory allocation fails for RX BD ring"
186 " on socket %u\n", socket_id);
187 rte_free(rxq->sw_rx_ring);
192 /* Allocate FW completion ring */
193 rc = qdev->ops->common->chain_alloc(edev,
194 ECORE_CHAIN_USE_TO_CONSUME,
195 ECORE_CHAIN_MODE_PBL,
196 ECORE_CHAIN_CNT_TYPE_U16,
198 sizeof(union eth_rx_cqe),
202 if (rc != ECORE_SUCCESS) {
203 DP_ERR(edev, "Memory allocation fails for RX CQE ring"
204 " on socket %u\n", socket_id);
205 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
206 rte_free(rxq->sw_rx_ring);
215 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
216 uint16_t nb_desc, unsigned int socket_id,
217 __rte_unused const struct rte_eth_rxconf *rx_conf,
218 struct rte_mempool *mp)
220 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
221 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
222 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
223 struct qede_rx_queue *rxq;
224 uint16_t max_rx_pkt_len;
228 PMD_INIT_FUNC_TRACE(edev);
230 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
231 if (!rte_is_power_of_2(nb_desc)) {
232 DP_ERR(edev, "Ring size %u is not power of 2\n",
237 /* Free memory prior to re-allocation if needed... */
238 if (dev->data->rx_queues[qid] != NULL) {
239 qede_rx_queue_release(dev->data->rx_queues[qid]);
240 dev->data->rx_queues[qid] = NULL;
243 max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
245 /* Fix up RX buffer size */
246 bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
247 /* cache align the mbuf size to simplfy rx_buf_size calculation */
248 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
249 if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
250 (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
251 if (!dev->data->scattered_rx) {
252 DP_INFO(edev, "Forcing scatter-gather mode\n");
253 dev->data->scattered_rx = 1;
257 rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
263 if (ECORE_IS_CMT(edev)) {
264 rxq = qede_alloc_rx_queue_mem(dev, qid * 2, nb_desc,
265 socket_id, mp, bufsz);
269 qdev->fp_array[qid * 2].rxq = rxq;
270 rxq = qede_alloc_rx_queue_mem(dev, qid * 2 + 1, nb_desc,
271 socket_id, mp, bufsz);
275 qdev->fp_array[qid * 2 + 1].rxq = rxq;
276 /* provide per engine fp struct as rx queue */
277 dev->data->rx_queues[qid] = &qdev->fp_array_cmt[qid];
279 rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,
280 socket_id, mp, bufsz);
284 dev->data->rx_queues[qid] = rxq;
285 qdev->fp_array[qid].rxq = rxq;
288 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
289 qid, nb_desc, rxq->rx_buf_size, socket_id);
295 qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
296 struct qede_rx_queue *rxq)
298 DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
299 ecore_chain_reset(&rxq->rx_bd_ring);
300 ecore_chain_reset(&rxq->rx_comp_ring);
303 *rxq->hw_cons_ptr = 0;
306 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
310 if (rxq->sw_rx_ring) {
311 for (i = 0; i < rxq->nb_rx_desc; i++) {
312 if (rxq->sw_rx_ring[i].mbuf) {
313 rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
314 rxq->sw_rx_ring[i].mbuf = NULL;
320 static void _qede_rx_queue_release(struct qede_dev *qdev,
321 struct ecore_dev *edev,
322 struct qede_rx_queue *rxq)
324 qede_rx_queue_release_mbufs(rxq);
325 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
326 qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
327 rte_free(rxq->sw_rx_ring);
331 void qede_rx_queue_release(void *rx_queue)
333 struct qede_rx_queue *rxq = rx_queue;
334 struct qede_fastpath_cmt *fp_cmt;
335 struct qede_dev *qdev;
336 struct ecore_dev *edev;
340 edev = QEDE_INIT_EDEV(qdev);
341 PMD_INIT_FUNC_TRACE(edev);
342 if (ECORE_IS_CMT(edev)) {
344 _qede_rx_queue_release(qdev, edev, fp_cmt->fp0->rxq);
345 _qede_rx_queue_release(qdev, edev, fp_cmt->fp1->rxq);
347 _qede_rx_queue_release(qdev, edev, rxq);
352 /* Stops a given RX queue in the HW */
353 static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
355 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
356 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
357 struct ecore_hwfn *p_hwfn;
358 struct qede_rx_queue *rxq;
362 if (rx_queue_id < qdev->num_rx_queues) {
363 rxq = qdev->fp_array[rx_queue_id].rxq;
364 hwfn_index = rx_queue_id % edev->num_hwfns;
365 p_hwfn = &edev->hwfns[hwfn_index];
366 rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
368 if (rc != ECORE_SUCCESS) {
369 DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
372 qede_rx_queue_release_mbufs(rxq);
373 qede_rx_queue_reset(qdev, rxq);
374 eth_dev->data->rx_queue_state[rx_queue_id] =
375 RTE_ETH_QUEUE_STATE_STOPPED;
376 DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
378 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
385 static struct qede_tx_queue *
386 qede_alloc_tx_queue_mem(struct rte_eth_dev *dev,
389 unsigned int socket_id,
390 const struct rte_eth_txconf *tx_conf)
392 struct qede_dev *qdev = dev->data->dev_private;
393 struct ecore_dev *edev = &qdev->edev;
394 struct qede_tx_queue *txq;
396 size_t sw_tx_ring_size;
398 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
399 RTE_CACHE_LINE_SIZE, socket_id);
403 "Unable to allocate memory for txq on socket %u",
408 txq->nb_tx_desc = nb_desc;
410 txq->port_id = dev->data->port_id;
412 rc = qdev->ops->common->chain_alloc(edev,
413 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
414 ECORE_CHAIN_MODE_PBL,
415 ECORE_CHAIN_CNT_TYPE_U16,
417 sizeof(union eth_tx_bd_types),
420 if (rc != ECORE_SUCCESS) {
422 "Unable to allocate memory for txbd ring on socket %u",
424 qede_tx_queue_release(txq);
428 /* Allocate software ring */
429 sw_tx_ring_size = sizeof(txq->sw_tx_ring) * txq->nb_tx_desc;
430 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
432 RTE_CACHE_LINE_SIZE, socket_id);
434 if (!txq->sw_tx_ring) {
436 "Unable to allocate memory for txbd ring on socket %u",
438 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
439 qede_tx_queue_release(txq);
443 txq->queue_id = queue_idx;
445 txq->nb_tx_avail = txq->nb_tx_desc;
447 txq->tx_free_thresh =
448 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
449 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
452 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
453 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
458 qede_tx_queue_setup(struct rte_eth_dev *dev,
461 unsigned int socket_id,
462 const struct rte_eth_txconf *tx_conf)
464 struct qede_dev *qdev = dev->data->dev_private;
465 struct ecore_dev *edev = &qdev->edev;
466 struct qede_tx_queue *txq;
468 PMD_INIT_FUNC_TRACE(edev);
470 if (!rte_is_power_of_2(nb_desc)) {
471 DP_ERR(edev, "Ring size %u is not power of 2\n",
476 /* Free memory prior to re-allocation if needed... */
477 if (dev->data->tx_queues[queue_idx] != NULL) {
478 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
479 dev->data->tx_queues[queue_idx] = NULL;
482 if (ECORE_IS_CMT(edev)) {
483 txq = qede_alloc_tx_queue_mem(dev, queue_idx * 2, nb_desc,
488 qdev->fp_array[queue_idx * 2].txq = txq;
489 txq = qede_alloc_tx_queue_mem(dev, (queue_idx * 2) + 1, nb_desc,
494 qdev->fp_array[(queue_idx * 2) + 1].txq = txq;
495 dev->data->tx_queues[queue_idx] =
496 &qdev->fp_array_cmt[queue_idx];
498 txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,
503 dev->data->tx_queues[queue_idx] = txq;
504 qdev->fp_array[queue_idx].txq = txq;
511 qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
512 struct qede_tx_queue *txq)
514 DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
515 ecore_chain_reset(&txq->tx_pbl);
518 *txq->hw_cons_ptr = 0;
521 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
525 if (txq->sw_tx_ring) {
526 for (i = 0; i < txq->nb_tx_desc; i++) {
527 if (txq->sw_tx_ring[i]) {
528 rte_pktmbuf_free(txq->sw_tx_ring[i]);
529 txq->sw_tx_ring[i] = NULL;
535 static void _qede_tx_queue_release(struct qede_dev *qdev,
536 struct ecore_dev *edev,
537 struct qede_tx_queue *txq)
539 qede_tx_queue_release_mbufs(txq);
540 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
541 rte_free(txq->sw_tx_ring);
545 void qede_tx_queue_release(void *tx_queue)
547 struct qede_tx_queue *txq = tx_queue;
548 struct qede_fastpath_cmt *fp_cmt;
549 struct qede_dev *qdev;
550 struct ecore_dev *edev;
554 edev = QEDE_INIT_EDEV(qdev);
555 PMD_INIT_FUNC_TRACE(edev);
557 if (ECORE_IS_CMT(edev)) {
559 _qede_tx_queue_release(qdev, edev, fp_cmt->fp0->txq);
560 _qede_tx_queue_release(qdev, edev, fp_cmt->fp1->txq);
562 _qede_tx_queue_release(qdev, edev, txq);
567 /* This function allocates fast-path status block memory */
569 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
572 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
573 struct status_block *sb_virt;
577 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
578 sizeof(struct status_block));
580 DP_ERR(edev, "Status block allocation failed\n");
583 rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
586 DP_ERR(edev, "Status block initialization failed\n");
587 OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
588 sizeof(struct status_block));
595 int qede_alloc_fp_resc(struct qede_dev *qdev)
597 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
598 struct qede_fastpath *fp;
603 PMD_INIT_FUNC_TRACE(edev);
606 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
608 num_sbs = ecore_cxt_get_proto_cid_count
609 (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
612 DP_ERR(edev, "No status blocks available\n");
616 qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
617 sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
619 if (!qdev->fp_array) {
620 DP_ERR(edev, "fp array allocation failed\n");
624 memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
625 sizeof(*qdev->fp_array));
627 if (ECORE_IS_CMT(edev)) {
628 qdev->fp_array_cmt = rte_calloc("fp_cmt",
629 QEDE_RXTX_MAX(qdev) / 2,
630 sizeof(*qdev->fp_array_cmt),
631 RTE_CACHE_LINE_SIZE);
633 if (!qdev->fp_array_cmt) {
634 DP_ERR(edev, "fp array for CMT allocation failed\n");
638 memset((void *)qdev->fp_array_cmt, 0,
639 (QEDE_RXTX_MAX(qdev) / 2) * sizeof(*qdev->fp_array_cmt));
641 /* Establish the mapping of fp_array with fp_array_cmt */
642 for (i = 0; i < QEDE_RXTX_MAX(qdev) / 2; i++) {
643 qdev->fp_array_cmt[i].qdev = qdev;
644 qdev->fp_array_cmt[i].fp0 = &qdev->fp_array[i * 2];
645 qdev->fp_array_cmt[i].fp1 = &qdev->fp_array[i * 2 + 1];
649 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
650 fp = &qdev->fp_array[sb_idx];
651 fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
652 RTE_CACHE_LINE_SIZE);
654 DP_ERR(edev, "FP sb_info allocation fails\n");
657 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
658 DP_ERR(edev, "FP status block allocation fails\n");
661 DP_INFO(edev, "sb_info idx 0x%x initialized\n",
662 fp->sb_info->igu_sb_id);
668 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
670 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
671 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
672 struct qede_fastpath *fp;
676 PMD_INIT_FUNC_TRACE(edev);
678 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
679 fp = &qdev->fp_array[sb_idx];
681 DP_INFO(edev, "Free sb_info index 0x%x\n",
682 fp->sb_info->igu_sb_id);
683 OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
684 fp->sb_info->sb_phys,
685 sizeof(struct status_block));
686 rte_free(fp->sb_info);
691 /* Free packet buffers and ring memories */
692 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
693 if (eth_dev->data->rx_queues[i]) {
694 qede_rx_queue_release(eth_dev->data->rx_queues[i]);
695 eth_dev->data->rx_queues[i] = NULL;
699 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
700 if (eth_dev->data->tx_queues[i]) {
701 qede_tx_queue_release(eth_dev->data->tx_queues[i]);
702 eth_dev->data->tx_queues[i] = NULL;
707 rte_free(qdev->fp_array);
708 qdev->fp_array = NULL;
710 if (qdev->fp_array_cmt)
711 rte_free(qdev->fp_array_cmt);
712 qdev->fp_array_cmt = NULL;
716 qede_update_rx_prod(__rte_unused struct qede_dev *edev,
717 struct qede_rx_queue *rxq)
719 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
720 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
721 struct eth_rx_prod_data rx_prods = { 0 };
723 /* Update producers */
724 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
725 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
727 /* Make sure that the BD and SGE data is updated before updating the
728 * producers since FW might read the BD/SGE right after the producer
733 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
734 (uint32_t *)&rx_prods);
736 /* mmiowb is needed to synchronize doorbell writes from more than one
737 * processor. It guarantees that the write arrives to the device before
738 * the napi lock is released and another qede_poll is called (possibly
739 * on another CPU). Without this barrier, the next doorbell can bypass
740 * this doorbell. This is applicable to IA64/Altix systems.
744 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
747 /* Starts a given RX queue in HW */
749 qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
751 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
752 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
753 struct ecore_queue_start_common_params params;
754 struct ecore_rxq_start_ret_params ret_params;
755 struct qede_rx_queue *rxq;
756 struct qede_fastpath *fp;
757 struct ecore_hwfn *p_hwfn;
758 dma_addr_t p_phys_table;
764 if (rx_queue_id < qdev->num_rx_queues) {
765 fp = &qdev->fp_array[rx_queue_id];
767 /* Allocate buffers for the Rx ring */
768 for (j = 0; j < rxq->nb_rx_desc; j++) {
769 rc = qede_alloc_rx_buffer(rxq);
771 DP_ERR(edev, "RX buffer allocation failed"
772 " for rxq = %u\n", rx_queue_id);
776 /* disable interrupts */
777 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
779 memset(¶ms, 0, sizeof(params));
780 params.queue_id = rx_queue_id / edev->num_hwfns;
782 params.stats_id = params.vport_id;
783 params.p_sb = fp->sb_info;
784 DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
785 fp->rxq->queue_id, fp->sb_info->igu_sb_id);
786 params.sb_idx = RX_PI;
787 hwfn_index = rx_queue_id % edev->num_hwfns;
788 p_hwfn = &edev->hwfns[hwfn_index];
789 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
790 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
791 memset(&ret_params, 0, sizeof(ret_params));
792 rc = ecore_eth_rx_queue_start(p_hwfn,
793 p_hwfn->hw_info.opaque_fid,
794 ¶ms, fp->rxq->rx_buf_size,
795 fp->rxq->rx_bd_ring.p_phys_addr,
796 p_phys_table, page_cnt,
799 DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
803 /* Update with the returned parameters */
804 fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
805 fp->rxq->handle = ret_params.p_handle;
807 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[RX_PI];
808 qede_update_rx_prod(qdev, fp->rxq);
809 eth_dev->data->rx_queue_state[rx_queue_id] =
810 RTE_ETH_QUEUE_STATE_STARTED;
811 DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
813 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
821 qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
823 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
824 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
825 struct ecore_queue_start_common_params params;
826 struct ecore_txq_start_ret_params ret_params;
827 struct ecore_hwfn *p_hwfn;
828 dma_addr_t p_phys_table;
829 struct qede_tx_queue *txq;
830 struct qede_fastpath *fp;
835 if (tx_queue_id < qdev->num_tx_queues) {
836 fp = &qdev->fp_array[tx_queue_id];
838 memset(¶ms, 0, sizeof(params));
839 params.queue_id = tx_queue_id / edev->num_hwfns;
841 params.stats_id = params.vport_id;
842 params.p_sb = fp->sb_info;
843 DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
844 fp->txq->queue_id, fp->sb_info->igu_sb_id);
845 params.sb_idx = TX_PI(0); /* tc = 0 */
846 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
847 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
848 hwfn_index = tx_queue_id % edev->num_hwfns;
849 p_hwfn = &edev->hwfns[hwfn_index];
850 if (qdev->dev_info.is_legacy)
851 fp->txq->is_legacy = true;
852 rc = ecore_eth_tx_queue_start(p_hwfn,
853 p_hwfn->hw_info.opaque_fid,
855 p_phys_table, page_cnt,
857 if (rc != ECORE_SUCCESS) {
858 DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
862 txq->doorbell_addr = ret_params.p_doorbell;
863 txq->handle = ret_params.p_handle;
865 txq->hw_cons_ptr = &fp->sb_info->sb_pi_array[TX_PI(0)];
866 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
868 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
870 SET_FIELD(txq->tx_db.data.params,
871 ETH_DB_DATA_AGG_VAL_SEL,
872 DQ_XCM_ETH_TX_BD_PROD_CMD);
873 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
874 eth_dev->data->tx_queue_state[tx_queue_id] =
875 RTE_ETH_QUEUE_STATE_STARTED;
876 DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
878 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
885 static inline uint16_t
886 qede_free_tx_pkt(struct qede_tx_queue *txq)
888 struct rte_mbuf *mbuf;
894 mbuf = txq->sw_tx_ring[idx];
896 nb_segs = mbuf->nb_segs;
897 PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
901 /* It's like consuming rxbuf in recv() */
902 ecore_chain_consume(&txq->tx_pbl);
906 rte_pktmbuf_free(mbuf);
907 txq->sw_tx_ring[idx] = NULL;
909 PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
911 ecore_chain_consume(&txq->tx_pbl);
918 qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
919 struct qede_tx_queue *txq)
925 rte_compiler_barrier();
926 sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
927 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
928 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
929 PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
930 abs(hw_bd_cons - sw_tx_cons));
933 remaining = hw_bd_cons - sw_tx_cons;
934 txq->nb_tx_avail += remaining;
937 remaining -= qede_free_tx_pkt(txq);
940 static int qede_drain_txq(struct qede_dev *qdev,
941 struct qede_tx_queue *txq, bool allow_drain)
943 struct ecore_dev *edev = &qdev->edev;
946 while (txq->sw_tx_cons != txq->sw_tx_prod) {
947 qede_process_tx_compl(edev, txq);
950 DP_ERR(edev, "Tx queue[%u] is stuck,"
951 "requesting MCP to drain\n",
953 rc = qdev->ops->common->drain(edev);
956 return qede_drain_txq(qdev, txq, false);
958 DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
959 "PROD=%d, CONS=%d\n",
960 txq->queue_id, txq->sw_tx_prod,
966 rte_compiler_barrier();
969 /* FW finished processing, wait for HW to transmit all tx packets */
975 /* Stops a given TX queue in the HW */
976 static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
978 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
979 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
980 struct ecore_hwfn *p_hwfn;
981 struct qede_tx_queue *txq;
985 if (tx_queue_id < qdev->num_tx_queues) {
986 txq = qdev->fp_array[tx_queue_id].txq;
988 if (qede_drain_txq(qdev, txq, true))
989 return -1; /* For the lack of retcodes */
991 hwfn_index = tx_queue_id % edev->num_hwfns;
992 p_hwfn = &edev->hwfns[hwfn_index];
993 rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
994 if (rc != ECORE_SUCCESS) {
995 DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
998 qede_tx_queue_release_mbufs(txq);
999 qede_tx_queue_reset(qdev, txq);
1000 eth_dev->data->tx_queue_state[tx_queue_id] =
1001 RTE_ETH_QUEUE_STATE_STOPPED;
1002 DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
1004 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
1011 int qede_start_queues(struct rte_eth_dev *eth_dev)
1013 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1017 for (id = 0; id < qdev->num_rx_queues; id++) {
1018 rc = qede_rx_queue_start(eth_dev, id);
1019 if (rc != ECORE_SUCCESS)
1023 for (id = 0; id < qdev->num_tx_queues; id++) {
1024 rc = qede_tx_queue_start(eth_dev, id);
1025 if (rc != ECORE_SUCCESS)
1032 void qede_stop_queues(struct rte_eth_dev *eth_dev)
1034 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1037 /* Stopping RX/TX queues */
1038 for (id = 0; id < qdev->num_tx_queues; id++)
1039 qede_tx_queue_stop(eth_dev, id);
1041 for (id = 0; id < qdev->num_rx_queues; id++)
1042 qede_rx_queue_stop(eth_dev, id);
1045 static inline bool qede_tunn_exist(uint16_t flag)
1047 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
1048 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
1051 static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
1053 return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
1054 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
1058 * qede_check_tunn_csum_l4:
1060 * 1 : If L4 csum is enabled AND if the validation has failed.
1063 static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
1065 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
1066 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
1067 return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
1068 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
1073 static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
1075 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1076 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
1077 return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1078 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
1083 /* Returns outer L2, L3 and L4 packet_type for tunneled packets */
1084 static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
1086 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
1087 struct rte_ether_hdr *eth_hdr;
1088 struct rte_ipv4_hdr *ipv4_hdr;
1089 struct rte_ipv6_hdr *ipv6_hdr;
1090 struct rte_vlan_hdr *vlan_hdr;
1092 bool vlan_tagged = 0;
1095 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1096 len = sizeof(struct rte_ether_hdr);
1097 ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
1099 /* Note: Valid only if VLAN stripping is disabled */
1100 if (ethertype == RTE_ETHER_TYPE_VLAN) {
1102 vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
1103 len += sizeof(struct rte_vlan_hdr);
1104 ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
1107 if (ethertype == RTE_ETHER_TYPE_IPV4) {
1108 packet_type |= RTE_PTYPE_L3_IPV4;
1109 ipv4_hdr = rte_pktmbuf_mtod_offset(m,
1110 struct rte_ipv4_hdr *, len);
1111 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
1112 packet_type |= RTE_PTYPE_L4_TCP;
1113 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
1114 packet_type |= RTE_PTYPE_L4_UDP;
1115 } else if (ethertype == RTE_ETHER_TYPE_IPV6) {
1116 packet_type |= RTE_PTYPE_L3_IPV6;
1117 ipv6_hdr = rte_pktmbuf_mtod_offset(m,
1118 struct rte_ipv6_hdr *, len);
1119 if (ipv6_hdr->proto == IPPROTO_TCP)
1120 packet_type |= RTE_PTYPE_L4_TCP;
1121 else if (ipv6_hdr->proto == IPPROTO_UDP)
1122 packet_type |= RTE_PTYPE_L4_UDP;
1126 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
1128 packet_type |= RTE_PTYPE_L2_ETHER;
1133 static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
1138 static const uint32_t
1139 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1140 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4 |
1141 RTE_PTYPE_INNER_L2_ETHER,
1142 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6 |
1143 RTE_PTYPE_INNER_L2_ETHER,
1144 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4 |
1145 RTE_PTYPE_INNER_L4_TCP |
1146 RTE_PTYPE_INNER_L2_ETHER,
1147 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6 |
1148 RTE_PTYPE_INNER_L4_TCP |
1149 RTE_PTYPE_INNER_L2_ETHER,
1150 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4 |
1151 RTE_PTYPE_INNER_L4_UDP |
1152 RTE_PTYPE_INNER_L2_ETHER,
1153 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6 |
1154 RTE_PTYPE_INNER_L4_UDP |
1155 RTE_PTYPE_INNER_L2_ETHER,
1156 /* Frags with no VLAN */
1157 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1158 RTE_PTYPE_INNER_L4_FRAG |
1159 RTE_PTYPE_INNER_L2_ETHER,
1160 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1161 RTE_PTYPE_INNER_L4_FRAG |
1162 RTE_PTYPE_INNER_L2_ETHER,
1164 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1165 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1166 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1167 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1168 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1169 RTE_PTYPE_INNER_L4_TCP |
1170 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1171 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1172 RTE_PTYPE_INNER_L4_TCP |
1173 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1174 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1175 RTE_PTYPE_INNER_L4_UDP |
1176 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1177 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1178 RTE_PTYPE_INNER_L4_UDP |
1179 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1180 /* Frags with VLAN */
1181 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1182 RTE_PTYPE_INNER_L4_FRAG |
1183 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1184 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1185 RTE_PTYPE_INNER_L4_FRAG |
1186 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1189 /* Bits (0..3) provides L3/L4 protocol type */
1190 /* Bits (4,5) provides frag and VLAN info */
1191 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1192 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1193 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1194 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1195 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1196 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1197 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1198 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1200 if (val < QEDE_PKT_TYPE_MAX)
1201 return ptype_lkup_tbl[val];
1203 return RTE_PTYPE_UNKNOWN;
1206 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
1211 static const uint32_t
1212 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1213 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
1214 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
1215 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 |
1218 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 |
1221 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 |
1224 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 |
1227 /* Frags with no VLAN */
1228 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4 |
1231 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6 |
1235 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4 |
1236 RTE_PTYPE_L2_ETHER_VLAN,
1237 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6 |
1238 RTE_PTYPE_L2_ETHER_VLAN,
1239 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4 |
1241 RTE_PTYPE_L2_ETHER_VLAN,
1242 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6 |
1244 RTE_PTYPE_L2_ETHER_VLAN,
1245 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4 |
1247 RTE_PTYPE_L2_ETHER_VLAN,
1248 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6 |
1250 RTE_PTYPE_L2_ETHER_VLAN,
1251 /* Frags with VLAN */
1252 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4 |
1254 RTE_PTYPE_L2_ETHER_VLAN,
1255 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6 |
1257 RTE_PTYPE_L2_ETHER_VLAN,
1260 /* Bits (0..3) provides L3/L4 protocol type */
1261 /* Bits (4,5) provides frag and VLAN info */
1262 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1263 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1264 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1265 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1266 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1267 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1268 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1269 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1271 if (val < QEDE_PKT_TYPE_MAX)
1272 return ptype_lkup_tbl[val];
1274 return RTE_PTYPE_UNKNOWN;
1277 static inline uint8_t
1278 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
1280 struct rte_ipv4_hdr *ip;
1285 val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1286 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
1288 if (unlikely(val)) {
1289 m->packet_type = qede_rx_cqe_to_pkt_type(flag);
1290 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
1291 ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
1292 sizeof(struct rte_ether_hdr));
1293 pkt_csum = ip->hdr_checksum;
1294 ip->hdr_checksum = 0;
1295 calc_csum = rte_ipv4_cksum(ip);
1296 ip->hdr_checksum = pkt_csum;
1297 return (calc_csum != pkt_csum);
1298 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
1305 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
1307 ecore_chain_consume(&rxq->rx_bd_ring);
1312 qede_reuse_page(__rte_unused struct qede_dev *qdev,
1313 struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
1315 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
1316 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
1317 struct qede_rx_entry *curr_prod;
1318 dma_addr_t new_mapping;
1320 curr_prod = &rxq->sw_rx_ring[idx];
1321 *curr_prod = *curr_cons;
1323 new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
1324 curr_prod->page_offset;
1326 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
1327 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
1333 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
1334 struct qede_dev *qdev, uint8_t count)
1336 struct qede_rx_entry *curr_cons;
1338 for (; count > 0; count--) {
1339 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
1340 qede_reuse_page(qdev, rxq, curr_cons);
1341 qede_rx_bd_ring_consume(rxq);
1346 qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
1347 struct qede_rx_queue *rxq,
1348 uint8_t agg_index, uint16_t len)
1350 struct qede_agg_info *tpa_info;
1351 struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
1354 /* Under certain conditions it is possible that FW may not consume
1355 * additional or new BD. So decision to consume the BD must be made
1356 * based on len_list[0].
1358 if (rte_le_to_cpu_16(len)) {
1359 tpa_info = &rxq->tpa_info[agg_index];
1360 cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1361 curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
1363 curr_frag->nb_segs = 1;
1364 curr_frag->pkt_len = rte_le_to_cpu_16(len);
1365 curr_frag->data_len = curr_frag->pkt_len;
1366 tpa_info->tpa_tail->next = curr_frag;
1367 tpa_info->tpa_tail = curr_frag;
1368 qede_rx_bd_ring_consume(rxq);
1369 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1370 PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
1371 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1372 rxq->rx_alloc_errors++;
1378 qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
1379 struct qede_rx_queue *rxq,
1380 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1382 PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
1383 cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
1384 /* only len_list[0] will have value */
1385 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1390 qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
1391 struct qede_rx_queue *rxq,
1392 struct eth_fast_path_rx_tpa_end_cqe *cqe)
1394 struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
1396 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1398 /* Update total length and frags based on end TPA */
1399 rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
1400 /* TODO: Add Sanity Checks */
1401 rx_mb->nb_segs = cqe->num_of_bds;
1402 rx_mb->pkt_len = cqe->total_packet_len;
1404 PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
1405 " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
1406 rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
1410 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
1415 static const uint32_t
1416 ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
1417 [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
1418 [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
1419 [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
1420 [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
1421 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
1422 RTE_PTYPE_TUNNEL_GENEVE,
1423 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
1424 RTE_PTYPE_TUNNEL_GRE,
1425 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
1426 RTE_PTYPE_TUNNEL_VXLAN,
1427 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
1428 RTE_PTYPE_TUNNEL_GENEVE,
1429 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
1430 RTE_PTYPE_TUNNEL_GRE,
1431 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
1432 RTE_PTYPE_TUNNEL_VXLAN,
1433 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
1434 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1435 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
1436 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1437 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
1438 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1439 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
1440 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1441 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
1442 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1443 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
1444 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1445 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
1446 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1447 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
1448 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1449 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
1450 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1451 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
1452 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1453 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
1454 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1455 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
1456 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1459 /* Cover bits[4-0] to include tunn_type and next protocol */
1460 val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
1461 ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
1462 (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
1463 ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
1465 if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
1466 return ptype_tunn_lkup_tbl[val];
1468 return RTE_PTYPE_UNKNOWN;
1472 qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
1473 uint8_t num_segs, uint16_t pkt_len)
1475 struct qede_rx_queue *rxq = p_rxq;
1476 struct qede_dev *qdev = rxq->qdev;
1477 register struct rte_mbuf *seg1 = NULL;
1478 register struct rte_mbuf *seg2 = NULL;
1479 uint16_t sw_rx_index;
1484 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1486 if (unlikely(!cur_size)) {
1487 PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
1488 " left for mapping jumbo\n", num_segs);
1489 qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
1492 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1493 seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
1494 qede_rx_bd_ring_consume(rxq);
1495 pkt_len -= cur_size;
1496 seg2->data_len = cur_size;
1506 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1508 print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
1511 PMD_RX_LOG(INFO, rxq,
1512 "len 0x%04x bf 0x%04x hash_val 0x%x"
1513 " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
1514 " inner_l2=%s inner_l3=%s inner_l4=%s\n",
1515 m->data_len, bitfield, m->hash.rss,
1516 (unsigned long)m->ol_flags,
1517 rte_get_ptype_l2_name(m->packet_type),
1518 rte_get_ptype_l3_name(m->packet_type),
1519 rte_get_ptype_l4_name(m->packet_type),
1520 rte_get_ptype_tunnel_name(m->packet_type),
1521 rte_get_ptype_inner_l2_name(m->packet_type),
1522 rte_get_ptype_inner_l3_name(m->packet_type),
1523 rte_get_ptype_inner_l4_name(m->packet_type));
1528 qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1530 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1531 register struct rte_mbuf *rx_mb = NULL;
1532 struct qede_rx_queue *rxq = p_rxq;
1533 struct qede_dev *qdev = rxq->qdev;
1534 struct ecore_dev *edev = &qdev->edev;
1535 union eth_rx_cqe *cqe;
1537 enum eth_rx_cqe_type cqe_type;
1538 int rss_enable = qdev->rss_enable;
1539 int rx_alloc_count = 0;
1540 uint32_t packet_type;
1542 uint16_t vlan_tci, port_id;
1543 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index, num_rx_bds;
1544 uint16_t rx_pkt = 0;
1545 uint16_t pkt_len = 0;
1546 uint16_t len; /* Length of first BD */
1547 uint16_t preload_idx;
1548 uint16_t parse_flag;
1549 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1550 uint8_t bitfield_val;
1552 uint8_t offset, flags, bd_num;
1555 /* Allocate buffers that we used in previous loop */
1556 if (rxq->rx_alloc_count) {
1557 if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
1558 rxq->rx_alloc_count))) {
1559 struct rte_eth_dev *dev;
1561 PMD_RX_LOG(ERR, rxq,
1562 "New buffer allocation failed,"
1563 "dropping incoming packetn");
1564 dev = &rte_eth_devices[rxq->port_id];
1565 dev->data->rx_mbuf_alloc_failed +=
1566 rxq->rx_alloc_count;
1567 rxq->rx_alloc_errors += rxq->rx_alloc_count;
1570 qede_update_rx_prod(qdev, rxq);
1571 rxq->rx_alloc_count = 0;
1574 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1575 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1579 if (hw_comp_cons == sw_comp_cons)
1582 num_rx_bds = NUM_RX_BDS(rxq);
1583 port_id = rxq->port_id;
1585 while (sw_comp_cons != hw_comp_cons) {
1587 packet_type = RTE_PTYPE_UNKNOWN;
1591 /* Get the CQE from the completion ring */
1593 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1594 cqe_type = cqe->fast_path_regular.type;
1595 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1597 if (likely(cqe_type == ETH_RX_CQE_TYPE_REGULAR)) {
1598 fp_cqe = &cqe->fast_path_regular;
1600 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
1601 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1602 ecore_eth_cqe_completion
1603 (&edev->hwfns[rxq->queue_id %
1605 (struct eth_slow_path_rx_cqe *)cqe);
1610 /* Get the data from the SW ring */
1611 sw_rx_index = rxq->sw_rx_cons & num_rx_bds;
1612 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
1613 assert(rx_mb != NULL);
1615 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1616 offset = fp_cqe->placement_offset;
1617 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1618 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1619 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1620 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1621 bd_num = fp_cqe->bd_num;
1622 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1623 bitfield_val = fp_cqe->bitfields;
1626 if (unlikely(qede_tunn_exist(parse_flag))) {
1627 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1628 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1629 PMD_RX_LOG(ERR, rxq,
1630 "L4 csum failed, flags = 0x%x\n",
1632 rxq->rx_hw_errors++;
1633 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1635 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1638 if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1639 PMD_RX_LOG(ERR, rxq,
1640 "Outer L3 csum failed, flags = 0x%x\n",
1642 rxq->rx_hw_errors++;
1643 ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1645 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1648 flags = fp_cqe->tunnel_pars_flags.flags;
1652 qede_rx_cqe_to_tunn_pkt_type(flags);
1656 qede_rx_cqe_to_pkt_type_inner(parse_flag);
1658 /* Outer L3/L4 types is not available in CQE */
1659 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1661 /* Outer L3/L4 types is not available in CQE.
1662 * Need to add offset to parse correctly,
1664 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1665 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1667 packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1670 /* Common handling for non-tunnel packets and for inner
1671 * headers in the case of tunnel.
1673 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1674 PMD_RX_LOG(ERR, rxq,
1675 "L4 csum failed, flags = 0x%x\n",
1677 rxq->rx_hw_errors++;
1678 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1680 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1682 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1683 PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1685 rxq->rx_hw_errors++;
1686 ol_flags |= PKT_RX_IP_CKSUM_BAD;
1688 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1691 if (unlikely(CQE_HAS_VLAN(parse_flag) ||
1692 CQE_HAS_OUTER_VLAN(parse_flag))) {
1693 /* Note: FW doesn't indicate Q-in-Q packet */
1694 ol_flags |= PKT_RX_VLAN;
1695 if (qdev->vlan_strip_flg) {
1696 ol_flags |= PKT_RX_VLAN_STRIPPED;
1697 rx_mb->vlan_tci = vlan_tci;
1702 ol_flags |= PKT_RX_RSS_HASH;
1703 rx_mb->hash.rss = rss_hash;
1707 qede_rx_bd_ring_consume(rxq);
1709 /* Prefetch next mbuf while processing current one. */
1710 preload_idx = rxq->sw_rx_cons & num_rx_bds;
1711 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
1713 /* Update rest of the MBUF fields */
1714 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1715 rx_mb->port = port_id;
1716 rx_mb->ol_flags = ol_flags;
1717 rx_mb->data_len = len;
1718 rx_mb->packet_type = packet_type;
1719 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1720 print_rx_bd_info(rx_mb, rxq, bitfield_val);
1722 rx_mb->nb_segs = bd_num;
1723 rx_mb->pkt_len = pkt_len;
1725 rx_pkts[rx_pkt] = rx_mb;
1729 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1730 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1731 if (rx_pkt == nb_pkts) {
1732 PMD_RX_LOG(DEBUG, rxq,
1733 "Budget reached nb_pkts=%u received=%u",
1739 /* Request number of bufferes to be allocated in next loop */
1740 rxq->rx_alloc_count = rx_alloc_count;
1742 rxq->rcv_pkts += rx_pkt;
1743 rxq->rx_segs += rx_pkt;
1744 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
1750 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1752 struct qede_rx_queue *rxq = p_rxq;
1753 struct qede_dev *qdev = rxq->qdev;
1754 struct ecore_dev *edev = &qdev->edev;
1755 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
1756 uint16_t rx_pkt = 0;
1757 union eth_rx_cqe *cqe;
1758 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1759 register struct rte_mbuf *rx_mb = NULL;
1760 register struct rte_mbuf *seg1 = NULL;
1761 enum eth_rx_cqe_type cqe_type;
1762 uint16_t pkt_len = 0; /* Sum of all BD segments */
1763 uint16_t len; /* Length of first BD */
1764 uint8_t num_segs = 1;
1765 uint16_t preload_idx;
1766 uint16_t parse_flag;
1767 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1768 uint8_t bitfield_val;
1770 uint8_t tunn_parse_flag;
1771 struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
1773 uint32_t packet_type;
1776 uint8_t offset, tpa_agg_idx, flags;
1777 struct qede_agg_info *tpa_info = NULL;
1779 int rx_alloc_count = 0;
1782 /* Allocate buffers that we used in previous loop */
1783 if (rxq->rx_alloc_count) {
1784 if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
1785 rxq->rx_alloc_count))) {
1786 struct rte_eth_dev *dev;
1788 PMD_RX_LOG(ERR, rxq,
1789 "New buffer allocation failed,"
1790 "dropping incoming packetn");
1791 dev = &rte_eth_devices[rxq->port_id];
1792 dev->data->rx_mbuf_alloc_failed +=
1793 rxq->rx_alloc_count;
1794 rxq->rx_alloc_errors += rxq->rx_alloc_count;
1797 qede_update_rx_prod(qdev, rxq);
1798 rxq->rx_alloc_count = 0;
1801 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1802 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1806 if (hw_comp_cons == sw_comp_cons)
1809 while (sw_comp_cons != hw_comp_cons) {
1811 packet_type = RTE_PTYPE_UNKNOWN;
1813 tpa_start_flg = false;
1816 /* Get the CQE from the completion ring */
1818 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1819 cqe_type = cqe->fast_path_regular.type;
1820 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1823 case ETH_RX_CQE_TYPE_REGULAR:
1824 fp_cqe = &cqe->fast_path_regular;
1826 case ETH_RX_CQE_TYPE_TPA_START:
1827 cqe_start_tpa = &cqe->fast_path_tpa_start;
1828 tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
1829 tpa_start_flg = true;
1830 /* Mark it as LRO packet */
1831 ol_flags |= PKT_RX_LRO;
1832 /* In split mode, seg_len is same as len_on_first_bd
1833 * and bw_ext_bd_len_list will be empty since there are
1834 * no additional buffers
1836 PMD_RX_LOG(INFO, rxq,
1837 "TPA start[%d] - len_on_first_bd %d header %d"
1838 " [bd_list[0] %d], [seg_len %d]\n",
1839 cqe_start_tpa->tpa_agg_index,
1840 rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
1841 cqe_start_tpa->header_len,
1842 rte_le_to_cpu_16(cqe_start_tpa->bw_ext_bd_len_list[0]),
1843 rte_le_to_cpu_16(cqe_start_tpa->seg_len));
1846 case ETH_RX_CQE_TYPE_TPA_CONT:
1847 qede_rx_process_tpa_cont_cqe(qdev, rxq,
1848 &cqe->fast_path_tpa_cont);
1850 case ETH_RX_CQE_TYPE_TPA_END:
1851 qede_rx_process_tpa_end_cqe(qdev, rxq,
1852 &cqe->fast_path_tpa_end);
1853 tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
1854 tpa_info = &rxq->tpa_info[tpa_agg_idx];
1855 rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
1857 case ETH_RX_CQE_TYPE_SLOW_PATH:
1858 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1859 ecore_eth_cqe_completion(
1860 &edev->hwfns[rxq->queue_id % edev->num_hwfns],
1861 (struct eth_slow_path_rx_cqe *)cqe);
1867 /* Get the data from the SW ring */
1868 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1869 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
1870 assert(rx_mb != NULL);
1872 /* Handle regular CQE or TPA start CQE */
1873 if (!tpa_start_flg) {
1874 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1875 offset = fp_cqe->placement_offset;
1876 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1877 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1878 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1879 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1880 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1881 bitfield_val = fp_cqe->bitfields;
1885 rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
1886 offset = cqe_start_tpa->placement_offset;
1887 /* seg_len = len_on_first_bd */
1888 len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
1889 vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
1890 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1891 bitfield_val = cqe_start_tpa->bitfields;
1893 rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
1895 if (qede_tunn_exist(parse_flag)) {
1896 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1897 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1898 PMD_RX_LOG(ERR, rxq,
1899 "L4 csum failed, flags = 0x%x\n",
1901 rxq->rx_hw_errors++;
1902 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1904 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1907 if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1908 PMD_RX_LOG(ERR, rxq,
1909 "Outer L3 csum failed, flags = 0x%x\n",
1911 rxq->rx_hw_errors++;
1912 ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1914 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1918 flags = cqe_start_tpa->tunnel_pars_flags.flags;
1920 flags = fp_cqe->tunnel_pars_flags.flags;
1921 tunn_parse_flag = flags;
1925 qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
1929 qede_rx_cqe_to_pkt_type_inner(parse_flag);
1931 /* Outer L3/L4 types is not available in CQE */
1932 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1934 /* Outer L3/L4 types is not available in CQE.
1935 * Need to add offset to parse correctly,
1937 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1938 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1940 packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1943 /* Common handling for non-tunnel packets and for inner
1944 * headers in the case of tunnel.
1946 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1947 PMD_RX_LOG(ERR, rxq,
1948 "L4 csum failed, flags = 0x%x\n",
1950 rxq->rx_hw_errors++;
1951 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1953 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1955 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1956 PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1958 rxq->rx_hw_errors++;
1959 ol_flags |= PKT_RX_IP_CKSUM_BAD;
1961 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1964 if (CQE_HAS_VLAN(parse_flag) ||
1965 CQE_HAS_OUTER_VLAN(parse_flag)) {
1966 /* Note: FW doesn't indicate Q-in-Q packet */
1967 ol_flags |= PKT_RX_VLAN;
1968 if (qdev->vlan_strip_flg) {
1969 ol_flags |= PKT_RX_VLAN_STRIPPED;
1970 rx_mb->vlan_tci = vlan_tci;
1975 if (qdev->rss_enable) {
1976 ol_flags |= PKT_RX_RSS_HASH;
1977 rx_mb->hash.rss = rss_hash;
1981 qede_rx_bd_ring_consume(rxq);
1983 if (!tpa_start_flg && fp_cqe->bd_num > 1) {
1984 PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
1985 " len on first: %04x Total Len: %04x",
1986 fp_cqe->bd_num, len, pkt_len);
1987 num_segs = fp_cqe->bd_num - 1;
1989 if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
1993 rx_alloc_count += num_segs;
1994 rxq->rx_segs += num_segs;
1996 rxq->rx_segs++; /* for the first segment */
1998 /* Prefetch next mbuf while processing current one. */
1999 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
2000 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
2002 /* Update rest of the MBUF fields */
2003 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
2004 rx_mb->port = rxq->port_id;
2005 rx_mb->ol_flags = ol_flags;
2006 rx_mb->data_len = len;
2007 rx_mb->packet_type = packet_type;
2008 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
2009 print_rx_bd_info(rx_mb, rxq, bitfield_val);
2011 if (!tpa_start_flg) {
2012 rx_mb->nb_segs = fp_cqe->bd_num;
2013 rx_mb->pkt_len = pkt_len;
2015 /* store ref to the updated mbuf */
2016 tpa_info->tpa_head = rx_mb;
2017 tpa_info->tpa_tail = tpa_info->tpa_head;
2019 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
2021 if (!tpa_start_flg) {
2022 rx_pkts[rx_pkt] = rx_mb;
2026 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
2027 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2028 if (rx_pkt == nb_pkts) {
2029 PMD_RX_LOG(DEBUG, rxq,
2030 "Budget reached nb_pkts=%u received=%u",
2036 /* Request number of bufferes to be allocated in next loop */
2037 rxq->rx_alloc_count = rx_alloc_count;
2039 rxq->rcv_pkts += rx_pkt;
2041 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
2047 qede_recv_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2049 struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
2050 uint16_t eng0_pkts, eng1_pkts;
2052 eng0_pkts = nb_pkts / 2;
2054 eng0_pkts = qede_recv_pkts(fp_cmt->fp0->rxq, rx_pkts, eng0_pkts);
2056 eng1_pkts = nb_pkts - eng0_pkts;
2058 eng1_pkts = qede_recv_pkts(fp_cmt->fp1->rxq, rx_pkts + eng0_pkts,
2061 return eng0_pkts + eng1_pkts;
2064 /* Populate scatter gather buffer descriptor fields */
2065 static inline uint16_t
2066 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
2067 struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
2070 struct qede_tx_queue *txq = p_txq;
2071 struct eth_tx_bd *tx_bd = NULL;
2073 uint16_t nb_segs = 0;
2075 /* Check for scattered buffers */
2077 if (start_seg == 0) {
2079 *bd2 = (struct eth_tx_2nd_bd *)
2080 ecore_chain_produce(&txq->tx_pbl);
2081 memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
2084 mapping = rte_mbuf_data_iova(m_seg);
2085 QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
2086 PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
2087 } else if (start_seg == 1) {
2089 *bd3 = (struct eth_tx_3rd_bd *)
2090 ecore_chain_produce(&txq->tx_pbl);
2091 memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
2094 mapping = rte_mbuf_data_iova(m_seg);
2095 QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
2096 PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
2098 tx_bd = (struct eth_tx_bd *)
2099 ecore_chain_produce(&txq->tx_pbl);
2100 memset(tx_bd, 0, sizeof(*tx_bd));
2102 mapping = rte_mbuf_data_iova(m_seg);
2103 QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
2104 PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
2107 m_seg = m_seg->next;
2110 /* Return total scattered buffers */
2114 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2116 print_tx_bd_info(struct qede_tx_queue *txq,
2117 struct eth_tx_1st_bd *bd1,
2118 struct eth_tx_2nd_bd *bd2,
2119 struct eth_tx_3rd_bd *bd3,
2120 uint64_t tx_ol_flags)
2122 char ol_buf[256] = { 0 }; /* for verbose prints */
2125 PMD_TX_LOG(INFO, txq,
2126 "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
2127 rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
2128 bd1->data.bd_flags.bitfields,
2129 rte_cpu_to_le_16(bd1->data.bitfields));
2131 PMD_TX_LOG(INFO, txq,
2132 "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
2133 rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
2134 bd2->data.bitfields2, bd2->data.tunn_ip_size);
2136 PMD_TX_LOG(INFO, txq,
2137 "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
2138 "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
2139 rte_cpu_to_le_16(bd3->nbytes),
2140 rte_cpu_to_le_16(bd3->data.bitfields),
2141 rte_cpu_to_le_16(bd3->data.lso_mss),
2142 bd3->data.tunn_l4_hdr_start_offset_w,
2143 bd3->data.tunn_hdr_size_w);
2145 rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
2146 PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
2150 /* TX prepare to check packets meets TX conditions */
2152 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2153 qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
2156 struct qede_tx_queue *txq = p_txq;
2158 qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
2165 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2169 for (i = 0; i < nb_pkts; i++) {
2171 ol_flags = m->ol_flags;
2172 if (ol_flags & PKT_TX_TCP_SEG) {
2173 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
2177 /* TBD: confirm its ~9700B for both ? */
2178 if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
2183 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
2188 if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
2189 /* We support only limited tunnel protocols */
2190 if (ol_flags & PKT_TX_TUNNEL_MASK) {
2193 temp = ol_flags & PKT_TX_TUNNEL_MASK;
2194 if (temp == PKT_TX_TUNNEL_VXLAN ||
2195 temp == PKT_TX_TUNNEL_GENEVE ||
2196 temp == PKT_TX_TUNNEL_MPLSINUDP ||
2197 temp == PKT_TX_TUNNEL_GRE)
2201 rte_errno = ENOTSUP;
2205 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2206 ret = rte_validate_tx_offload(m);
2214 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2215 if (unlikely(i != nb_pkts))
2216 PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
2222 #define MPLSINUDP_HDR_SIZE (12)
2224 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2226 qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
2227 struct qede_tx_queue *txq)
2229 if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
2230 PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
2231 if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
2232 MPLSINUDP_HDR_SIZE) / 2) > 0xff)
2233 PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
2234 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
2235 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
2236 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
2237 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
2238 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
2239 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
2244 qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2246 struct qede_tx_queue *txq = p_txq;
2247 struct qede_dev *qdev = txq->qdev;
2248 struct ecore_dev *edev = &qdev->edev;
2249 struct eth_tx_1st_bd *bd1;
2250 struct eth_tx_2nd_bd *bd2;
2251 struct eth_tx_3rd_bd *bd3;
2252 struct rte_mbuf *m_seg = NULL;
2253 struct rte_mbuf *mbuf;
2254 struct rte_mbuf **sw_tx_ring;
2255 uint16_t nb_tx_pkts;
2258 uint16_t nb_frags = 0;
2259 uint16_t nb_pkt_sent = 0;
2261 uint64_t tx_ol_flags;
2264 uint8_t bd1_bd_flags_bf;
2266 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
2267 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
2268 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
2269 qede_process_tx_compl(edev, txq);
2272 nb_tx_pkts = nb_pkts;
2273 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2274 sw_tx_ring = txq->sw_tx_ring;
2276 while (nb_tx_pkts--) {
2277 /* Init flags/values */
2283 bd1_bd_flags_bf = 0;
2290 /* Check minimum TX BDS availability against available BDs */
2291 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
2294 tx_ol_flags = mbuf->ol_flags;
2295 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2297 if (unlikely(txq->nb_tx_avail <
2298 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2301 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2302 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2304 /* Offload the IP checksum in the hardware */
2305 if (tx_ol_flags & PKT_TX_IP_CKSUM)
2307 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2309 /* L4 checksum offload (tcp or udp) */
2310 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
2311 (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM)))
2313 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2315 /* Fill the entry in the SW ring and the BDs in the FW ring */
2317 sw_tx_ring[idx] = mbuf;
2320 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2321 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2324 /* Map MBUF linear data for DMA and set in the BD1 */
2325 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2327 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2328 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2330 /* Handle fragmented MBUF */
2331 if (unlikely(mbuf->nb_segs > 1)) {
2334 /* Encode scatter gather buffer descriptors */
2335 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3,
2339 bd1->data.nbds = nbds + nb_frags;
2341 txq->nb_tx_avail -= bd1->data.nbds;
2344 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2345 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2346 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2352 /* Write value of prod idx into bd_prod */
2353 txq->tx_db.data.bd_prod = bd_prod;
2355 rte_compiler_barrier();
2356 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2359 /* Check again for Tx completions */
2360 qede_process_tx_compl(edev, txq);
2362 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2363 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2369 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2371 struct qede_tx_queue *txq = p_txq;
2372 struct qede_dev *qdev = txq->qdev;
2373 struct ecore_dev *edev = &qdev->edev;
2374 struct rte_mbuf *mbuf;
2375 struct rte_mbuf *m_seg = NULL;
2376 uint16_t nb_tx_pkts;
2380 uint16_t nb_pkt_sent = 0;
2384 __rte_unused bool tunn_flg;
2385 bool tunn_ipv6_ext_flg;
2386 struct eth_tx_1st_bd *bd1;
2387 struct eth_tx_2nd_bd *bd2;
2388 struct eth_tx_3rd_bd *bd3;
2389 uint64_t tx_ol_flags;
2393 uint8_t bd1_bd_flags_bf;
2402 uint8_t tunn_l4_hdr_start_offset;
2403 uint8_t tunn_hdr_size;
2404 uint8_t inner_l2_hdr_size;
2405 uint16_t inner_l4_hdr_offset;
2407 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
2408 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
2409 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
2410 qede_process_tx_compl(edev, txq);
2413 nb_tx_pkts = nb_pkts;
2414 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2415 while (nb_tx_pkts--) {
2416 /* Init flags/values */
2426 bd1_bd_flags_bf = 0;
2431 mplsoudp_flg = false;
2432 tunn_ipv6_ext_flg = false;
2434 tunn_l4_hdr_start_offset = 0;
2439 /* Check minimum TX BDS availability against available BDs */
2440 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
2443 tx_ol_flags = mbuf->ol_flags;
2444 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2446 /* TX prepare would have already checked supported tunnel Tx
2447 * offloads. Don't rely on pkt_type marked by Rx, instead use
2448 * tx_ol_flags to decide.
2450 tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
2453 /* Check against max which is Tunnel IPv6 + ext */
2454 if (unlikely(txq->nb_tx_avail <
2455 ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
2458 /* First indicate its a tunnel pkt */
2459 bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
2460 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2461 /* Legacy FW had flipped behavior in regard to this bit
2462 * i.e. it needed to set to prevent FW from touching
2463 * encapsulated packets when it didn't need to.
2465 if (unlikely(txq->is_legacy)) {
2467 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2470 /* Outer IP checksum offload */
2471 if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
2472 PKT_TX_OUTER_IPV4)) {
2474 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
2475 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
2479 * Currently, only inner checksum offload in MPLS-in-UDP
2480 * tunnel with one MPLS label is supported. Both outer
2481 * and inner layers lengths need to be provided in
2484 if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
2485 PKT_TX_TUNNEL_MPLSINUDP) {
2486 mplsoudp_flg = true;
2487 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2488 qede_mpls_tunn_tx_sanity_check(mbuf, txq);
2490 /* Outer L4 offset in two byte words */
2491 tunn_l4_hdr_start_offset =
2492 (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
2493 /* Tunnel header size in two byte words */
2494 tunn_hdr_size = (mbuf->outer_l2_len +
2495 mbuf->outer_l3_len +
2496 MPLSINUDP_HDR_SIZE) / 2;
2497 /* Inner L2 header size in two byte words */
2498 inner_l2_hdr_size = (mbuf->l2_len -
2499 MPLSINUDP_HDR_SIZE) / 2;
2500 /* Inner L4 header offset from the beggining
2501 * of inner packet in two byte words
2503 inner_l4_hdr_offset = (mbuf->l2_len -
2504 MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
2506 /* Inner L2 size and address type */
2507 bd2_bf1 |= (inner_l2_hdr_size &
2508 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
2509 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
2510 bd2_bf1 |= (UNICAST_ADDRESS &
2511 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
2512 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
2513 /* Treated as IPv6+Ext */
2515 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
2517 /* Mark inner IPv6 if present */
2518 if (tx_ol_flags & PKT_TX_IPV6)
2520 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
2522 /* Inner L4 offsets */
2523 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
2524 (tx_ol_flags & (PKT_TX_UDP_CKSUM |
2525 PKT_TX_TCP_CKSUM))) {
2526 /* Determines if BD3 is needed */
2527 tunn_ipv6_ext_flg = true;
2528 if ((tx_ol_flags & PKT_TX_L4_MASK) ==
2531 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
2534 /* TODO other pseudo checksum modes are
2538 ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
2539 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
2540 bd2_bf2 |= (inner_l4_hdr_offset &
2541 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
2542 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
2544 } /* End MPLSoUDP */
2545 } /* End Tunnel handling */
2547 if (tx_ol_flags & PKT_TX_TCP_SEG) {
2549 if (unlikely(txq->nb_tx_avail <
2550 ETH_TX_MIN_BDS_PER_LSO_PKT))
2552 /* For LSO, packet header and payload must reside on
2553 * buffers pointed by different BDs. Using BD1 for HDR
2554 * and BD2 onwards for data.
2556 hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
2558 hdr_size += mbuf->outer_l2_len +
2561 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
2563 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2564 /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
2566 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2567 mss = rte_cpu_to_le_16(mbuf->tso_segsz);
2568 /* Using one header BD */
2569 bd3_bf |= rte_cpu_to_le_16(1 <<
2570 ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2572 if (unlikely(txq->nb_tx_avail <
2573 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2576 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2577 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2580 /* Descriptor based VLAN insertion */
2581 if (tx_ol_flags & PKT_TX_VLAN_PKT) {
2582 vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
2584 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
2587 /* Offload the IP checksum in the hardware */
2588 if (tx_ol_flags & PKT_TX_IP_CKSUM) {
2590 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2591 /* There's no DPDK flag to request outer-L4 csum
2592 * offload. But in the case of tunnel if inner L3 or L4
2593 * csum offload is requested then we need to force
2594 * recalculation of L4 tunnel header csum also.
2596 if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
2597 PKT_TX_TUNNEL_GRE)) {
2599 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2600 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2604 /* L4 checksum offload (tcp or udp) */
2605 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
2606 (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
2608 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2609 /* There's no DPDK flag to request outer-L4 csum
2610 * offload. But in the case of tunnel if inner L3 or L4
2611 * csum offload is requested then we need to force
2612 * recalculation of L4 tunnel header csum also.
2616 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2617 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2621 /* Fill the entry in the SW ring and the BDs in the FW ring */
2623 txq->sw_tx_ring[idx] = mbuf;
2626 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2627 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2630 /* Map MBUF linear data for DMA and set in the BD1 */
2631 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2633 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2634 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2635 bd1->data.vlan = vlan;
2637 if (lso_flg || mplsoudp_flg) {
2638 bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
2640 memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
2644 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2647 QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
2648 rte_mbuf_data_iova(mbuf)),
2649 mbuf->data_len - hdr_size);
2650 bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
2652 bd2->data.bitfields2 =
2653 rte_cpu_to_le_16(bd2_bf2);
2655 bd2->data.tunn_ip_size =
2656 rte_cpu_to_le_16(mbuf->outer_l3_len);
2659 if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
2660 bd3 = (struct eth_tx_3rd_bd *)
2661 ecore_chain_produce(&txq->tx_pbl);
2662 memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
2664 bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
2666 bd3->data.lso_mss = mss;
2668 bd3->data.tunn_l4_hdr_start_offset_w =
2669 tunn_l4_hdr_start_offset;
2670 bd3->data.tunn_hdr_size_w =
2676 /* Handle fragmented MBUF */
2679 /* Encode scatter gather buffer descriptors if required */
2680 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
2681 bd1->data.nbds = nbds + nb_frags;
2683 txq->nb_tx_avail -= bd1->data.nbds;
2686 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2687 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2688 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2694 /* Write value of prod idx into bd_prod */
2695 txq->tx_db.data.bd_prod = bd_prod;
2697 rte_compiler_barrier();
2698 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2701 /* Check again for Tx completions */
2702 qede_process_tx_compl(edev, txq);
2704 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2705 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2711 qede_xmit_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2713 struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
2714 uint16_t eng0_pkts, eng1_pkts;
2716 eng0_pkts = nb_pkts / 2;
2718 eng0_pkts = qede_xmit_pkts(fp_cmt->fp0->txq, tx_pkts, eng0_pkts);
2720 eng1_pkts = nb_pkts - eng0_pkts;
2722 eng1_pkts = qede_xmit_pkts(fp_cmt->fp1->txq, tx_pkts + eng0_pkts,
2725 return eng0_pkts + eng1_pkts;
2729 qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
2730 __rte_unused struct rte_mbuf **pkts,
2731 __rte_unused uint16_t nb_pkts)
2737 /* this function does a fake walk through over completion queue
2738 * to calculate number of BDs used by HW.
2739 * At the end, it restores the state of completion queue.
2742 qede_parse_fp_cqe(struct qede_rx_queue *rxq)
2744 uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
2745 union eth_rx_cqe *cqe, *orig_cqe = NULL;
2747 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
2748 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2750 if (hw_comp_cons == sw_comp_cons)
2753 /* Get the CQE from the completion ring */
2754 cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2757 while (sw_comp_cons != hw_comp_cons) {
2758 switch (cqe->fast_path_regular.type) {
2759 case ETH_RX_CQE_TYPE_REGULAR:
2760 bd_count += cqe->fast_path_regular.bd_num;
2762 case ETH_RX_CQE_TYPE_TPA_END:
2763 bd_count += cqe->fast_path_tpa_end.num_of_bds;
2770 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2771 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2774 /* revert comp_ring to original state */
2775 ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
2781 qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
2783 uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
2784 uint16_t produced, consumed;
2785 struct qede_rx_queue *rxq = p_rxq;
2787 if (offset > rxq->nb_rx_desc)
2790 sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
2791 sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
2793 /* find BDs used by HW from completion queue elements */
2794 hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
2796 if (hw_bd_cons < sw_bd_cons)
2797 /* wraparound case */
2798 consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
2800 consumed = hw_bd_cons - sw_bd_cons;
2802 if (offset <= consumed)
2803 return RTE_ETH_RX_DESC_DONE;
2805 if (sw_bd_prod < sw_bd_cons)
2806 /* wraparound case */
2807 produced = (0xffff - sw_bd_cons) + sw_bd_prod;
2809 produced = sw_bd_prod - sw_bd_cons;
2811 if (offset <= produced)
2812 return RTE_ETH_RX_DESC_AVAIL;
2814 return RTE_ETH_RX_DESC_UNAVAIL;